hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
e00d829be3ed605223af58ba86183e22358dab40.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmgeelltmv.cu normal z -> d, Sat Nov 15 19:54:21 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void dmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy) { extern __shared__ double dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors hipLaunchKernelGGL(( dmgeelltmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
e00d829be3ed605223af58ba86183e22358dab40.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @generated from zmgeelltmv.cu normal z -> d, Sat Nov 15 19:54:21 2014 */ #include "common_magma.h" #if (GPUSHMEM < 200) #define BLOCK_SIZE 128 #else #define BLOCK_SIZE 512 #endif __global__ void dmgeelltmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy) { extern __shared__ double dot[]; int row = blockDim.x * blockIdx.x + threadIdx.x ; if(row < num_rows ){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x+ i*blockDim.x ] = MAGMA_D_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row ; n ++){ int col = dcolind [ num_rows * n + row ]; double val = dval [ num_rows * n + row ]; if( val != 0){ for( int i=0; i<num_vecs; i++ ) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++ ) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i*num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELL. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha double scalar multiplier @param[in] dval magmaDouble_ptr array containing values of A in ELL @param[in] dcolind magmaIndex_ptr columnindices of A in ELL @param[in] dx magmaDouble_ptr input vector x @param[in] beta double scalar multiplier @param[out] dy magmaDouble_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_dblas ********************************************************************/ extern "C" magma_int_t magma_dmgeelltmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, double alpha, magmaDouble_ptr dval, magmaIndex_ptr dcolind, magmaDouble_ptr dx, double beta, magmaDouble_ptr dy, magma_queue_t queue ) { dim3 grid( (m+BLOCK_SIZE-1)/BLOCK_SIZE, 1, 1); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( double ); // num_vecs vectors dmgeelltmv_kernel<<< grid, threads, MEM_SIZE, queue >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
c32be869186cef83af8c00889c65b24321ecba58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(hipMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(hipGetLastError()); */ static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void vectorTransformKernel(float* A, float* B, float* Result) { // insert operation here int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < 1000000) Result[i] = Result[i] + A[i] * B[i]; } void vectorTransformCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 512; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(hipMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(hipMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(hipFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(hipMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(hipMemcpy(deviceA, a, n*sizeof(float), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(deviceB, b, n*sizeof(float), hipMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); hipLaunchKernelGGL(( vectorTransformKernel), dim3(977), dim3(1024), 0, 0, deviceA, deviceB, deviceResult); //vectorTransformKernel<<<977,1024>>>(deviceA, deviceB, deviceResult); hipLaunchKernelGGL(( vectorTransformKernel), dim3(977), dim3(1024), 0, 0, deviceA, deviceB, deviceResult); hipLaunchKernelGGL(( vectorTransformKernel), dim3(977), dim3(1024), 0, 0, deviceA, deviceB, deviceResult); hipLaunchKernelGGL(( vectorTransformKernel), dim3(977), dim3(1024), 0, 0, deviceA, deviceB, deviceResult); hipLaunchKernelGGL(( vectorTransformKernel), dim3(977), dim3(1024), 0, 0, deviceA, deviceB, deviceResult); hipDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(hipGetLastError()); // copy result back memoryTime.start(); checkCudaCall(hipMemcpy(result, deviceResult, n * sizeof(float), hipMemcpyDeviceToHost)); checkCudaCall(hipMemcpy(b, deviceB, n * sizeof(float), hipMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(hipFree(deviceA)); checkCudaCall(hipFree(deviceB)); checkCudaCall(hipFree(deviceResult)); cout << "vector-transform (kernel): \t\t" << kernelTime1 << endl; cout << "vector-transform (memory): \t\t" << memoryTime << endl; } int vectorTransformSeq(int n, float* a, float* b, float* result) { int i,j; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (j=0; j<5; j++) { for (i=0; i<n; i++) { result[i] = result[i]+a[i]*b[i]; } } sequentialTime.stop(); cout << "vector-transform (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 1000000; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Iteratively transform vector A with vector B of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = 0.1*i; result[i]=0; result_s[i]=0; } vectorTransformSeq(n, a, b, result_s); vectorTransformCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { //if(result[i] != n /*2*i*/) { //cout<<result[i]<<"\t"<<result_s[i]<<"\n"; if (fabs(result[i]-result_s[i])<0) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
c32be869186cef83af8c00889c65b24321ecba58.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <iostream> #include "timer.h" using namespace std; /* Utility function, use to do error checking. Use this function like this: checkCudaCall(cudaMalloc((void **) &deviceRGB, imgS * sizeof(color_t))); And to check the result of a kernel invocation: checkCudaCall(cudaGetLastError()); */ static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void vectorTransformKernel(float* A, float* B, float* Result) { // insert operation here int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < 1000000) Result[i] = Result[i] + A[i] * B[i]; } void vectorTransformCuda(int n, float* a, float* b, float* result) { int threadBlockSize = 512; // allocate the vectors on the GPU float* deviceA = NULL; checkCudaCall(cudaMalloc((void **) &deviceA, n * sizeof(float))); if (deviceA == NULL) { cout << "could not allocate memory!" << endl; return; } float* deviceB = NULL; checkCudaCall(cudaMalloc((void **) &deviceB, n * sizeof(float))); if (deviceB == NULL) { checkCudaCall(cudaFree(deviceA)); cout << "could not allocate memory!" << endl; return; } float* deviceResult = NULL; checkCudaCall(cudaMalloc((void **) &deviceResult, n * sizeof(float))); if (deviceResult == NULL) { checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); cout << "could not allocate memory!" << endl; return; } timer kernelTime1 = timer("kernelTime1"); timer memoryTime = timer("memoryTime"); // copy the original vectors to the GPU memoryTime.start(); checkCudaCall(cudaMemcpy(deviceA, a, n*sizeof(float), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(deviceB, b, n*sizeof(float), cudaMemcpyHostToDevice)); memoryTime.stop(); // execute kernel kernelTime1.start(); vectorTransformKernel<<<977, 1024>>>(deviceA, deviceB, deviceResult); //vectorTransformKernel<<<977,1024>>>(deviceA, deviceB, deviceResult); vectorTransformKernel<<<977, 1024>>>(deviceA, deviceB, deviceResult); vectorTransformKernel<<<977, 1024>>>(deviceA, deviceB, deviceResult); vectorTransformKernel<<<977, 1024>>>(deviceA, deviceB, deviceResult); vectorTransformKernel<<<977, 1024>>>(deviceA, deviceB, deviceResult); cudaDeviceSynchronize(); kernelTime1.stop(); // check whether the kernel invocation was successful checkCudaCall(cudaGetLastError()); // copy result back memoryTime.start(); checkCudaCall(cudaMemcpy(result, deviceResult, n * sizeof(float), cudaMemcpyDeviceToHost)); checkCudaCall(cudaMemcpy(b, deviceB, n * sizeof(float), cudaMemcpyDeviceToHost)); memoryTime.stop(); checkCudaCall(cudaFree(deviceA)); checkCudaCall(cudaFree(deviceB)); checkCudaCall(cudaFree(deviceResult)); cout << "vector-transform (kernel): \t\t" << kernelTime1 << endl; cout << "vector-transform (memory): \t\t" << memoryTime << endl; } int vectorTransformSeq(int n, float* a, float* b, float* result) { int i,j; timer sequentialTime = timer("Sequential"); sequentialTime.start(); for (j=0; j<5; j++) { for (i=0; i<n; i++) { result[i] = result[i]+a[i]*b[i]; } } sequentialTime.stop(); cout << "vector-transform (sequential): \t\t" << sequentialTime << endl; } int main(int argc, char* argv[]) { int n = 1000000; float* a = new float[n]; float* b = new float[n]; float* result = new float[n]; float* result_s = new float[n]; if (argc > 1) n = atoi(argv[1]); cout << "Iteratively transform vector A with vector B of " << n << " integer elements." << endl; // initialize the vectors. for(int i=0; i<n; i++) { a[i] = i; b[i] = 0.1*i; result[i]=0; result_s[i]=0; } vectorTransformSeq(n, a, b, result_s); vectorTransformCuda(n, a, b, result); // verify the resuls for(int i=0; i<n; i++) { //if(result[i] != n /*2*i*/) { //cout<<result[i]<<"\t"<<result_s[i]<<"\n"; if (fabs(result[i]-result_s[i])<0) { cout << "error in results! Element " << i << " is " << result[i] << ", but should be " << result_s[i] << endl; exit(1); } } cout << "results OK!" << endl; delete[] a; delete[] b; delete[] result; return 0; }
35cff31b0eb67eb6bb8beff625ee2fb608b31bdf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __constant__ int directions[4][2] = {{ -1, 0 }, { 1, 0 }, { 0, -1 }, { 0, 1 }}; __device__ __host__ bool white(float val) { return val > 127.0f; } __device__ __host__ int clipvalue(int x, int lb, int ub) {return min(ub-1, max(x, lb));} //__device__ __host__ __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } __global__ void CalculateFixed(const float *background, const float *target, const float *mask, float *fixed, int wb, int hb, int wt, int ht, int oy, int ox, int stride = 1) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int syt = stride*yt; const int sxt = stride*xt; const int curt = wt*syt+sxt; if (syt < ht and sxt < wt and white(mask[curt])) { float tmpfixedr = 0.0f; float tmpfixedg = 0.0f; float tmpfixedb = 0.0f; for (int i=0; i<4; ++i) { int neighbor_y = syt + stride*directions[i][0]; int neighbor_x = sxt + stride*directions[i][1]; const int tneighbor = wt*neighbor_y + neighbor_x; if (0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt) { tmpfixedr += target[curt*3 + 0] - target[tneighbor*3 + 0]; tmpfixedg += target[curt*3 + 1] - target[tneighbor*3 + 1]; tmpfixedb += target[curt*3 + 2] - target[tneighbor*3 + 2]; } const int bgneighbor = wb*(clipvalue(oy + stride*(yt + directions[i][0]), 0, hb)) + clipvalue(ox + stride*(xt + directions[i][1]), 0, wb); if (!(0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt) or !white(mask[tneighbor])) { tmpfixedr += background[bgneighbor*3 + 0]; tmpfixedg += background[bgneighbor*3 + 1]; tmpfixedb += background[bgneighbor*3 + 2]; } } fixed[curt*3 + 0] = tmpfixedr; fixed[curt*3 + 1] = tmpfixedg; fixed[curt*3 + 2] = tmpfixedb; } } __global__ void PoissonImageCloningIteration(const float *fixed, const float *mask, const float *buf1, float *buf2, int wt, int ht, int stride = 1, float omega = 1) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int syt = stride*yt; const int sxt = stride*xt; const int curt = wt*syt+sxt; if (syt < ht and sxt < wt and white(mask[curt])) { float newr = fixed[curt*3 + 0]; float newg = fixed[curt*3 + 1]; float newb = fixed[curt*3 + 2]; for (int i=0; i<4; ++i) { int neighbor_y = syt + stride*directions[i][0]; int neighbor_x = sxt + stride*directions[i][1]; const int tneighbor = wt*neighbor_y + neighbor_x; if (0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt and white(mask[tneighbor])) { newr += buf1[tneighbor*3 + 0]; newg += buf1[tneighbor*3 + 1]; newb += buf1[tneighbor*3 + 2]; } } buf2[curt*3 + 0] = newr * 0.25f; buf2[curt*3 + 1] = newg * 0.25f; buf2[curt*3 + 2] = newb * 0.25f; } } __global__ void scaleUp( const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox, int stride ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and white(mask[curt])) { int basey = (yt/stride) * stride; int basex = (xt/stride) * stride; int baset = basey*wt + basex; // interpolation? if (0 <= basey and basey < ht and 0 <= basex and basex < wt) { output[curt*3 + 0] = output[baset*3 + 0]; output[curt*3 + 1] = output[baset*3 + 1]; output[curt*3 + 2] = output[baset*3 + 2]; } } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); //SimpleClone<<<dim3(CeilDiv(wt,32), CeilDiv(ht,16)), dim3(32,16)>>>( // background, target, mask, output, // wb, hb, wt, ht, oy, ox //); // set up float *fixed, *buf1, *buf2; hipMalloc(&fixed, 3*wt*ht*sizeof(float)); hipMalloc(&buf1, 3*wt*ht*sizeof(float)); hipMalloc(&buf2, 3*wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); //CalculateFixed<<<gdim, bdim>>>( background, target, mask, fixed, wb, hb, wt, ht, oy, ox); hipMemcpy(buf1, target, sizeof(float)*3*wt*ht, hipMemcpyDeviceToDevice); // iterate //CalculateFixed<<<gdim, bdim>>>( output, target, mask, fixed, wb, hb, wt, ht, oy, ox, 1); //for (int i = 0; i < 10000; ++i) { // hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, fixed, mask, buf1, buf2, wt, ht, 1, 1); // hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, fixed, mask, buf2, buf1, wt, ht, 1, 1); //} // scale up for (int scale=16; scale>0; scale>>=1) { hipLaunchKernelGGL(( CalculateFixed), dim3(gdim), dim3(bdim), 0, 0, output, target, mask, fixed, wb, hb, wt, ht, oy, ox, scale); for (int iter = 0; iter < 100; ++iter) { hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, fixed, mask, buf1, buf2, wt, ht, scale, 1); hipLaunchKernelGGL(( PoissonImageCloningIteration), dim3(gdim), dim3(bdim), 0, 0, fixed, mask, buf2, buf1, wt, ht, scale, 1); } if (scale == 1) break; //SimpleClone<<<gdim, bdim>>>(background, buf1 , mask, output, wb, hb, wt, ht, oy, ox); hipLaunchKernelGGL(( scaleUp), dim3(gdim), dim3(bdim), 0, 0, mask, buf1, wb, hb, wt, ht, oy, ox, scale); } // copy the image back hipMemcpy(output, background, wb*hb*sizeof(float)*3, hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( SimpleClone), dim3(gdim), dim3(bdim), 0, 0, background, buf1 , mask, output, wb, hb, wt, ht, oy, ox); // clean up hipFree(fixed); hipFree(buf1); hipFree(buf2); }
35cff31b0eb67eb6bb8beff625ee2fb608b31bdf.cu
#include "lab3.h" #include <cstdio> __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __constant__ int directions[4][2] = {{ -1, 0 }, { 1, 0 }, { 0, -1 }, { 0, 1 }}; __device__ __host__ bool white(float val) { return val > 127.0f; } __device__ __host__ int clipvalue(int x, int lb, int ub) {return min(ub-1, max(x, lb));} //__device__ __host__ __global__ void SimpleClone( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and mask[curt] > 127.0f) { const int yb = oy+yt, xb = ox+xt; const int curb = wb*yb+xb; if (0 <= yb and yb < hb and 0 <= xb and xb < wb) { output[curb*3+0] = target[curt*3+0]; output[curb*3+1] = target[curt*3+1]; output[curb*3+2] = target[curt*3+2]; } } } __global__ void CalculateFixed(const float *background, const float *target, const float *mask, float *fixed, int wb, int hb, int wt, int ht, int oy, int ox, int stride = 1) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int syt = stride*yt; const int sxt = stride*xt; const int curt = wt*syt+sxt; if (syt < ht and sxt < wt and white(mask[curt])) { float tmpfixedr = 0.0f; float tmpfixedg = 0.0f; float tmpfixedb = 0.0f; for (int i=0; i<4; ++i) { int neighbor_y = syt + stride*directions[i][0]; int neighbor_x = sxt + stride*directions[i][1]; const int tneighbor = wt*neighbor_y + neighbor_x; if (0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt) { tmpfixedr += target[curt*3 + 0] - target[tneighbor*3 + 0]; tmpfixedg += target[curt*3 + 1] - target[tneighbor*3 + 1]; tmpfixedb += target[curt*3 + 2] - target[tneighbor*3 + 2]; } const int bgneighbor = wb*(clipvalue(oy + stride*(yt + directions[i][0]), 0, hb)) + clipvalue(ox + stride*(xt + directions[i][1]), 0, wb); if (!(0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt) or !white(mask[tneighbor])) { tmpfixedr += background[bgneighbor*3 + 0]; tmpfixedg += background[bgneighbor*3 + 1]; tmpfixedb += background[bgneighbor*3 + 2]; } } fixed[curt*3 + 0] = tmpfixedr; fixed[curt*3 + 1] = tmpfixedg; fixed[curt*3 + 2] = tmpfixedb; } } __global__ void PoissonImageCloningIteration(const float *fixed, const float *mask, const float *buf1, float *buf2, int wt, int ht, int stride = 1, float omega = 1) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int syt = stride*yt; const int sxt = stride*xt; const int curt = wt*syt+sxt; if (syt < ht and sxt < wt and white(mask[curt])) { float newr = fixed[curt*3 + 0]; float newg = fixed[curt*3 + 1]; float newb = fixed[curt*3 + 2]; for (int i=0; i<4; ++i) { int neighbor_y = syt + stride*directions[i][0]; int neighbor_x = sxt + stride*directions[i][1]; const int tneighbor = wt*neighbor_y + neighbor_x; if (0 <= neighbor_y and neighbor_y < ht and 0 <= neighbor_x and neighbor_x < wt and white(mask[tneighbor])) { newr += buf1[tneighbor*3 + 0]; newg += buf1[tneighbor*3 + 1]; newb += buf1[tneighbor*3 + 2]; } } buf2[curt*3 + 0] = newr * 0.25f; buf2[curt*3 + 1] = newg * 0.25f; buf2[curt*3 + 2] = newb * 0.25f; } } __global__ void scaleUp( const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox, int stride ) { const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt*yt+xt; if (yt < ht and xt < wt and white(mask[curt])) { int basey = (yt/stride) * stride; int basex = (xt/stride) * stride; int baset = basey*wt + basex; // interpolation? if (0 <= basey and basey < ht and 0 <= basex and basex < wt) { output[curt*3 + 0] = output[baset*3 + 0]; output[curt*3 + 1] = output[baset*3 + 1]; output[curt*3 + 2] = output[baset*3 + 2]; } } } void PoissonImageCloning( const float *background, const float *target, const float *mask, float *output, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ) { cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); //SimpleClone<<<dim3(CeilDiv(wt,32), CeilDiv(ht,16)), dim3(32,16)>>>( // background, target, mask, output, // wb, hb, wt, ht, oy, ox //); // set up float *fixed, *buf1, *buf2; cudaMalloc(&fixed, 3*wt*ht*sizeof(float)); cudaMalloc(&buf1, 3*wt*ht*sizeof(float)); cudaMalloc(&buf2, 3*wt*ht*sizeof(float)); // initialize the iteration dim3 gdim(CeilDiv(wt,32), CeilDiv(ht,16)), bdim(32,16); //CalculateFixed<<<gdim, bdim>>>( background, target, mask, fixed, wb, hb, wt, ht, oy, ox); cudaMemcpy(buf1, target, sizeof(float)*3*wt*ht, cudaMemcpyDeviceToDevice); // iterate //CalculateFixed<<<gdim, bdim>>>( output, target, mask, fixed, wb, hb, wt, ht, oy, ox, 1); //for (int i = 0; i < 10000; ++i) { // PoissonImageCloningIteration<<<gdim, bdim>>>(fixed, mask, buf1, buf2, wt, ht, 1, 1); // PoissonImageCloningIteration<<<gdim, bdim>>>(fixed, mask, buf2, buf1, wt, ht, 1, 1); //} // scale up for (int scale=16; scale>0; scale>>=1) { CalculateFixed<<<gdim, bdim>>>( output, target, mask, fixed, wb, hb, wt, ht, oy, ox, scale); for (int iter = 0; iter < 100; ++iter) { PoissonImageCloningIteration<<<gdim, bdim>>>(fixed, mask, buf1, buf2, wt, ht, scale, 1); PoissonImageCloningIteration<<<gdim, bdim>>>(fixed, mask, buf2, buf1, wt, ht, scale, 1); } if (scale == 1) break; //SimpleClone<<<gdim, bdim>>>(background, buf1 , mask, output, wb, hb, wt, ht, oy, ox); scaleUp<<<gdim, bdim>>>(mask, buf1, wb, hb, wt, ht, oy, ox, scale); } // copy the image back cudaMemcpy(output, background, wb*hb*sizeof(float)*3, cudaMemcpyDeviceToDevice); SimpleClone<<<gdim, bdim>>>(background, buf1 , mask, output, wb, hb, wt, ht, oy, ox); // clean up cudaFree(fixed); cudaFree(buf1); cudaFree(buf2); }
f6f2927b948517011570b715bd3dfea88f86b5f3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <c10/macros/Macros.h> #include <hiprand/hiprand_kernel.h> #include <THH/THHGeneral.h> #include <THH/THHTensorRandom.h> #include <THH/THHGenerator.hpp> THCGenerator* THCRandom_getGenerator(THCState* state); namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 C10_LAUNCH_BOUNDS_2(256, 8) #elif defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; hiprandStatePhilox4_32_10_t state; hiprand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //hiprand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = hiprand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen){ Tensor ret = at::empty_like(self); Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte)); const int64_t nelem = self.numel(); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = ::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: hipLaunchKernelGGL(( fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1>), dim3(grid), dim3(dim_block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } THCudaCheck(hipGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
f6f2927b948517011570b715bd3dfea88f86b5f3.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <c10/macros/Macros.h> #include <curand_kernel.h> #include <THC/THCGeneral.h> #include <THC/THCTensorRandom.h> #include <THC/THCGenerator.hpp> THCGenerator* THCRandom_getGenerator(THCState* state); namespace at{ namespace native{ namespace { // philox generates 128 bits of randomness at a time. Kernel uses this explicitly by putting suitably transformed result into float4 // for all members of float4 to be consumed UNROLL has to be 4. Don't change! const int UNROLL = 4; std::pair<uint64_t, uint64_t> next_philox_seed(at::Generator* gen, uint64_t increment) { auto gen_ = THCRandom_getGenerator(at::globalContext().getTHCState()); uint64_t offset = gen_->state.philox_seed_offset.fetch_add(increment); return std::make_pair(gen_->state.initial_seed, offset); } template < typename scalar_t, typename accscalar_t, typename IndexType, int ADims> #if __CUDA_ARCH__ >= 350 C10_LAUNCH_BOUNDS_2(256, 8) #elif defined (__HIP_PLATFORM_HCC__) C10_LAUNCH_BOUNDS_2(256, 4) #endif __global__ void fused_dropout_kernel(cuda::detail::TensorInfo<scalar_t, IndexType> a, cuda::detail::TensorInfo<scalar_t, IndexType> b, cuda::detail::TensorInfo<uint8_t, IndexType> c, IndexType totalElements, accscalar_t p, std::pair<uint64_t, uint64_t> seeds ) { accscalar_t pinv = accscalar_t(1)/p; IndexType idx = blockIdx.x * blockDim.x + threadIdx.x; curandStatePhilox4_32_10_t state; curand_init( seeds.first, idx, seeds.second, &state); IndexType rounded_size = ((totalElements - 1)/(blockDim.x * gridDim.x * UNROLL)+1) * blockDim.x * gridDim.x * UNROLL; for (IndexType linearIndex = idx; linearIndex < rounded_size; linearIndex += gridDim.x * blockDim.x*UNROLL) { //curand_uniform_double was pure evil anyway, not doing what it promises, and there's nothing for halfs, so generate float for everything float4 rand = curand_uniform4(&state); scalar_t src[UNROLL]; rand.x = rand.x < p; rand.y = rand.y < p; rand.z = rand.z < p; rand.w = rand.w < p; for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `a` const IndexType aOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, ADims>::get(li, a); src[ii] = a.data[aOffset]; } } for (int ii = 0; ii < UNROLL; ii++) { IndexType li = linearIndex + blockDim.x * gridDim.x * ii; if (li < totalElements) { // Convert `linearIndex` into an offset of `b` const IndexType bOffset = cuda::detail::IndexToOffset<scalar_t, IndexType, 1>::get(li, b); b.data[bOffset] = src[ii]*(&rand.x)[ii]*pinv; c.data[bOffset] = (uint8_t)(&rand.x)[ii]; } } __syncthreads(); } } template<typename scalar_t, typename accscalar_t> void masked_scale_kernel(at::Tensor& ret, const at::Tensor src, const at::Tensor mask, accscalar_t scale){ at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, uint8_t>(ret, src, mask, [scale]__device__(scalar_t& ret_val, const scalar_t& src_val, const uint8_t mask_val){ ret_val = (float)mask_val * src_val * scale; }); } } //anonymous namespace std::tuple<Tensor,Tensor> fused_dropout_cuda(const Tensor& self, double p, Generator * gen){ Tensor ret = at::empty_like(self); Tensor mask = at::empty(self.sizes(), self.options().dtype(kByte)); const int64_t nelem = self.numel(); const int64_t block_size = 256; unsigned int blocks_per_sm = at::cuda::getCurrentDeviceProperties()->maxThreadsPerMultiProcessor/block_size; dim3 dim_block(block_size); dim3 grid((nelem + block_size -1)/block_size); grid.x = std::min((unsigned int)at::cuda::getCurrentDeviceProperties()->multiProcessorCount * blocks_per_sm, grid.x); //number of times random will be generated per thread, to offset philox counter in thc random state int64_t counter_offset = ((nelem - 1)/(block_size*grid.x*UNROLL)+1)*UNROLL; if (cuda::detail::canUse32BitIndexMath(self)){ AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, unsigned int>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, unsigned int>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: fused_dropout_kernel<scalar_t, accscalar_t, unsigned int, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } else { AT_DISPATCH_FLOATING_TYPES_AND_HALF(self.scalar_type(), "fused_dropout", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(p); auto self_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(self); auto ret_info = cuda::detail::getTensorInfo<scalar_t, uint64_t>(ret); auto mask_info = cuda::detail::getTensorInfo<uint8_t, uint64_t>(mask); self_info.collapseDims(); ret_info.collapseDims(); mask_info.collapseDims(); //ret and mask are collapsed to 1d contiguous tensor switch (self_info.dims) { case 1: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, 1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); break; default: fused_dropout_kernel<scalar_t, accscalar_t, uint64_t, -1><<<grid, dim_block, 0, at::cuda::getCurrentCUDAStream()>>>(self_info, ret_info, mask_info, nelem, pa, next_philox_seed(gen,counter_offset)); } }); } THCudaCheck(cudaGetLastError()); return std::tuple<Tensor,Tensor>(ret, mask); } Tensor masked_scale_cuda(const Tensor& self, const Tensor& mask, double scale){ Tensor ret = at::empty_like(self); TORCH_CHECK(mask.scalar_type() == at::ScalarType::Byte, "mask should be torch.uint8 dtype"); AT_DISPATCH_FLOATING_TYPES_AND_HALF(ret.scalar_type(), "masked_scale", [&] { using accscalar_t = acc_type<scalar_t, true>; accscalar_t pa = (accscalar_t)(scale); masked_scale_kernel<scalar_t>(ret, self, mask, pa); }); return ret; } } }
a1263940af2904cc2abcff37d2425ea59a38c946.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> #include <algorithm> #include <omp.h> #include "utils.h" #include <hip/hip_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> // CPU namespace v0 { /** * @brief CPU * * @param k * @param m * @param n * @param s_points * @param r_points * @param results */ extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { int *tmp = (int *)malloc(sizeof(int) * m); for (int i = 0; i < m; ++i) { float minSum = INFINITY; int index = 0; for (int j = 0; j < n; ++j) { float tempSum = 0; for (int t = 0; t < k; ++t) { const float diff = s_points[i * k + t] - r_points[j * k + t]; tempSum += diff * diff; // } if (minSum > tempSum) { // minSum = tempSum; index = j; } } tmp[i] = index; } *results = tmp; } } // GPU: m*n namespace v1 { extern __global__ void get_dis_kernel( const int k, // const int m, // const int n, // const float *__restrict__ s_points, // const float *__restrict__ r_points, // float *__restrict__ dis) // { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idm = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idm < m) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } dis[idn + idm * n] = tempSum; // m*n } } /** * @brief * * @param m * @param n * @param dis * @param result */ template <int BLOCK_DIM> static __global__ void get_min_kernel( // const int m, const int n, const float *__restrict__ dis, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { const float tempSum = dis[idn + blockIdx.y * n]; if (dis_s[threadIdx.x] > tempSum) { // dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { // if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { float *dis_d, *s_d, *r_d; int *results_d; CHECK(hipMalloc((void **)&dis_d, m * n * sizeof(float))); CHECK(hipMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(hipMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&results_d, m * sizeof(int))); CHECK(hipMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; // blockSize hipLaunchKernelGGL(( get_dis_kernel), dim3( dim3(divup(n, BLOCK_DIM_X), divup(m, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, m, n, s_d, r_d, dis_d); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; // blockSize hipLaunchKernelGGL(( get_min_kernel<BLOCK_DIM>), dim3(m), dim3(BLOCK_DIM), 0, 0, m, n, dis_d, results_d); // CHECK(hipMemcpy((void **)*results, (void *)results_d, m * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(dis_d)); CHECK(hipFree(s_d)); CHECK(hipFree(r_d)); CHECK(hipFree(results_d)); } }; // GPU: thrust namespace v2 { extern __global__ void get_dis_kernel( const int k, // const int m, // const int n, // const float *__restrict__ s_points, // const float *__restrict__ r_points, // float *__restrict__ dis) // { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idm = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idm < m) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } dis[idn + idm * n] = tempSum; // m*n } } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { thrust::device_vector<float> dis_d(m * n); thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(r_points, r_points + k * n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; // blockSize hipLaunchKernelGGL(( get_dis_kernel), dim3( dim3(divup(n, BLOCK_DIM_X), divup(m, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, m, n, thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(dis_d.data())); *results = (int *)malloc(sizeof(int) * m); for (int i = 0; i < m; ++i) // (*results)[i] = thrust::min_element(dis_d.begin() + n * i, dis_d.begin() + n * i + n) - dis_d.begin() - n * i; } }; // GPU namespace v3 { /** * @brief cuda * * @tparam BLOCK_DIM * @param k * @param m * @param n * @param s_points * @param r_points * @param result */ template <int BLOCK_DIM> static __global__ void cudaCallKernel( const int k, const int m, const int n, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { // const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { // if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { float *s_d, *r_d; int *results_d; CHECK(hipMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(hipMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&results_d, m * sizeof(int))); CHECK(hipMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), hipMemcpyHostToDevice)); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; // blockSize hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(m), dim3(BLOCK_DIM), 0, 0, k, m, n, s_d, r_d, results_d); // CHECK(hipMemcpy((void **)*results, (void *)results_d, m * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(s_d)); CHECK(hipFree(r_d)); CHECK(hipFree(results_d)); } }; // GPUAoS2SoA namespace v4 { /** * @brief * * @param k * @param n * @param input * @param output */ static __global__ void mat_inv_kernel( const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { float *s_d, *r_d, *rr_d; int *results_d; CHECK(hipMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(hipMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&rr_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&results_d, m * sizeof(int))); CHECK(hipMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), hipMemcpyHostToDevice)); CHECK(hipMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; hipLaunchKernelGGL(( mat_inv_kernel), dim3( dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, n, r_d, rr_d); const int BLOCK_DIM = 1024; hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(m), dim3(BLOCK_DIM), 0, 0, k, m, n, s_d, rr_d, results_d); // CHECK(hipMemcpy((void **)*results, (void *)results_d, m * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(s_d)); CHECK(hipFree(r_d)); CHECK(hipFree(rr_d)); CHECK(hipFree(results_d)); } }; // GPU namespace v5 { template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const float *__restrict__ s_points, hipTextureObject_t texObj, // int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - tex2D<float>(texObj, idk, idn); tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { if (n > 65536) { // v4::cudaCall(k, m, n, s_points, r_points, results); return; } hipArray *cuArray; hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); CHECK(hipMallocArray(&cuArray, &channelDesc, k, n)); CHECK(hipMemcpy2DToArray(cuArray, 0, 0, r_points, sizeof(float) * k, sizeof(float) * k, n, hipMemcpyHostToDevice)); // cudaArray struct hipResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = hipResourceTypeArray; resDesc.res.array.array = cuArray; // struct hipTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = hipReadModeElementType; // hipTextureObject_t texObj = 0; CHECK(hipCreateTextureObject(&texObj, &resDesc, &texDesc, NULL)); float *s_d; int *results_d; CHECK(hipMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(hipMalloc((void **)&results_d, m * sizeof(int))); CHECK(hipMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), hipMemcpyHostToDevice)); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(m), dim3(BLOCK_DIM), 0, 0, k, m, n, s_d, texObj, results_d); // // CHECK(hipDestroyTextureObject(texObj)); CHECK(hipMemcpy((void **)*results, (void *)results_d, m * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(s_d)); CHECK(hipFree(results_d)); } }; // GPU namespace v6 { static __constant__ float const_mem[(64 << 10) / sizeof(float)]; // static __global__ void mat_inv_kernel( // const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = const_mem[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { if (k * m > (64 << 10) / sizeof(float)) { v4::cudaCall(k, m, n, s_points, r_points, results); return; } CHECK(hipMemcpyToSymbol(const_mem, s_points, sizeof(float) * k * m)); // float *r_d, *rr_d; int *results_d; CHECK(hipMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&rr_d, k * n * sizeof(float))); CHECK(hipMalloc((void **)&results_d, m * sizeof(int))); CHECK(hipMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), hipMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; hipLaunchKernelGGL(( mat_inv_kernel), dim3( dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, n, r_d, rr_d); const int BLOCK_DIM = 1024; hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(m), dim3(BLOCK_DIM), 0, 0, k, m, n, rr_d, results_d); // CHECK(hipMemcpy((void **)*results, (void *)results_d, m * sizeof(int), hipMemcpyDeviceToHost)); CHECK(hipFree(r_d)); CHECK(hipFree(rr_d)); CHECK(hipFree(results_d)); } }; // GPUblock namespace v7 { static __global__ void mat_inv_kernel( // const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(r_points, r_points + k * n); thrust::device_vector<float> rr_d(k * n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; hipLaunchKernelGGL(( mat_inv_kernel), dim3( dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( // block &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(dim3(results_d.size() / m, m)), dim3(BLOCK_DIM), 0, 0, k, m, n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); *results = (int *)malloc(sizeof(int) * m); if (results_d.size() == m) { thrust::copy(results_d.begin(), results_d.end(), *results); return; } thrust::host_vector<int> results_tmp(results_d); for (int idm = 0; idm < m; ++idm) { // CPU float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // GPU: namespace v8 { static __global__ void mat_inv_kernel( // const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { thrust::host_vector<int> results_tmp; int num_gpus = 0; CHECK(hipGetDeviceCount(&num_gpus)); // if (num_gpus > n) num_gpus = n; if (num_gpus < 1) return v0::cudaCall(k, m, n, s_points, r_points, results); if (n <= thrust::min(1 << 18, m << 10)) return v7::cudaCall(k, m, n, s_points, r_points, results); #pragma omp parallel num_threads(num_gpus) // { int thread_num = omp_get_thread_num(); int thread_n = divup(n, num_gpus); float *thread_r_points = r_points + thread_num * thread_n * k; // if (thread_num == num_gpus - 1) { thread_n = n - thread_num * thread_n; if (thread_n == 0) { thread_n = 1; thread_r_points -= k; } } CHECK(hipSetDevice(thread_num)); // thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(thread_r_points, thread_r_points + k * thread_n); thrust::device_vector<float> rr_d(k * thread_n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; hipLaunchKernelGGL(( mat_inv_kernel), dim3( dim3(divup(thread_n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, thread_n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(dim3(results_d.size() / m, m)), dim3(BLOCK_DIM), 0, 0, k, m, thread_n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); int my_beg, my_end; #pragma omp critical // { my_beg = results_tmp.size(); results_tmp.insert(results_tmp.end(), results_d.begin(), results_d.end()); my_end = results_tmp.size(); } #pragma omp barrier // for (int offset = (thread_r_points - r_points) / k; my_beg < my_end; ++my_beg) results_tmp[my_beg] += offset; // indexindex } *results = (int *)malloc(sizeof(int) * m); for (int idm = 0; idm < m; ++idm) { // CPU float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // GPU+ namespace v9 { static __global__ void mat_inv_kernel( // const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); if (threadIdx.x < 512) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 512]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 512]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 512]; } __syncthreads(); if (threadIdx.x < 256) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 256]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 256]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 256]; } __syncthreads(); if (threadIdx.x < 128) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 128]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 128]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 128]; } __syncthreads(); if (threadIdx.x < 64) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 64]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 64]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 64]; } __syncthreads(); if (threadIdx.x < 32) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 32]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 32]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 32]; } if (threadIdx.x < 16) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 16]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 16]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 16]; } if (threadIdx.x < 8) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 8]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 8]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 8]; } if (threadIdx.x < 4) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 4]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 4]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 4]; } if (threadIdx.x < 2) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 2]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 2]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 2]; } if (threadIdx.x == 0) result[id] = dis_s[0] > dis_s[1] ? ind_s[1] : ind_s[0]; } extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { thrust::host_vector<int> results_tmp; int num_gpus = 0; CHECK(hipGetDeviceCount(&num_gpus)); // if (num_gpus > n) num_gpus = n; if (num_gpus < 1) return v0::cudaCall(k, m, n, s_points, r_points, results); if (n <= thrust::min(1 << 18, m << 10)) return v7::cudaCall(k, m, n, s_points, r_points, results); #pragma omp parallel num_threads(num_gpus) // { int thread_num = omp_get_thread_num(); int thread_n = divup(n, num_gpus); float *thread_r_points = r_points + thread_num * thread_n * k; // if (thread_num == num_gpus - 1) { thread_n = n - thread_num * thread_n; if (thread_n == 0) { thread_n = 1; thread_r_points -= k; } } CHECK(hipSetDevice(thread_num)); // thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(thread_r_points, thread_r_points + k * thread_n); thrust::device_vector<float> rr_d(k * thread_n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; hipLaunchKernelGGL(( mat_inv_kernel), dim3( dim3(divup(thread_n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y))), dim3( dim3(BLOCK_DIM_X, BLOCK_DIM_Y)), 0, 0, k, thread_n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); hipLaunchKernelGGL(( cudaCallKernel<BLOCK_DIM>), dim3(dim3(results_d.size() / m, m)), dim3(BLOCK_DIM), 0, 0, k, m, thread_n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); int my_beg, my_end; #pragma omp critical // { my_beg = results_tmp.size(); results_tmp.insert(results_tmp.end(), results_d.begin(), results_d.end()); my_end = results_tmp.size(); } #pragma omp barrier // for (int offset = (thread_r_points - r_points) / k; my_beg < my_end; ++my_beg) results_tmp[my_beg] += offset; // indexindex } *results = (int *)malloc(sizeof(int) * m); for (int idm = 0; idm < m; ++idm) { // CPU float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // CPU :KDTree namespace v10 { float *s_points, *r_points; int k; struct DimCmp { int dim; bool operator()(int lhs, int rhs) const { return r_points[lhs * k + dim] < r_points[rhs * k + dim]; } }; struct KDTreeCPU { thrust::host_vector<int> p, dim; /** * @brief Construct a new KDTreeCPU object * * @param n */ KDTreeCPU(int n) : p(n << 2, -1), dim(p) { thrust::host_vector<int> se(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); build(se.begin(), se.end()); } /** * @brief kd-tree * * @param beg * @param end * @param rt */ void build(thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int rt = 1) { if (beg >= end) return; float sa_max = -INFINITY; for (int idk = 0; idk < k; ++idk) { float sum = 0, sa = 0; for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { float val = r_points[(*it) * k + idk]; sum += val, sa += val * val; } sa = (sa - sum * sum / (end - beg)) / (end - beg); if (sa_max < sa) sa_max = sa, dim[rt] = idk; } thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; std::nth_element(beg, mid, end, DimCmp{dim[rt]}); p[rt] = *mid; build(beg, mid, rt << 1); build(++mid, end, rt << 1 | 1); } /** * @brief * * @param x * @param ans * @param rt * @return thrust::pair<float, int> */ thrust::pair<float, int> ask(int x, thrust::pair<float, int> ans = {INFINITY, 0}, int rt = 1) { if (dim[rt] < 0) return ans; float d = s_points[x * k + dim[rt]] - r_points[p[rt] * k + dim[rt]], tmp = 0; for (int idk = 0; idk < k; ++idk) { float diff = s_points[x * k + idk] - r_points[p[rt] * k + idk]; tmp += diff * diff; } int w = d > 0; ans = ask(x, min(ans, {tmp, p[rt]}), (rt << 1) ^ w); if (ans.first > d * d - 1e-6) ans = ask(x, ans, (rt << 1) ^ w ^ 1); return ans; } }; extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results) // { if (k > 16) return v0::cudaCall(k, m, n, s_points, r_points, results); v10::k = k; v10::s_points = s_points; v10::r_points = r_points; long sta, end; sta = getTime(); KDTreeCPU kd(n); end = getTime(); *results = (int *)malloc(sizeof(int) * m); printf("---search on KD-Tree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); for (int i = 0; i < m; ++i) (*results)[i] = kd.ask(i).second; } } // GPU :KDTree // namespace v10 // { // __device__ thrust::pair<float, int> ask_device( // float *s_d, // float *r_d, // int *dim, // int *p, // int k, // int x, // thrust::pair<float, int> ans = {INFINITY, 0}, // int rt = 1) // { // int dimrt = dim[rt]; // if (dimrt < 0) // return ans; // int prt = p[rt];` // if (prt < 0) // return ans; // float d = s_d[x * k + dimrt] - r_d[prt * k + dimrt], tmp = 0; // for (int idk = 0; idk < k; ++idk) { // float diff = s_d[x * k + idk] - r_d[prt * k + idk]; // tmp += diff * diff; // } // int w = d > 0; // ans = ask_device(s_d, r_d, dim, p, k, x, thrust::min(ans, {tmp, prt}), (rt << 1) ^ w); // if (ans.first > d * d - 1e-6) // ans = ask_device(s_d, r_d, dim, p, k, x, ans, (rt << 1) ^ w ^ 1); // return ans; // } // __global__ void range_ask_kernel( // float *s_d, // float *r_d, // int *dim, // int *p, // int k, // int m, // int *results) // { // int global_id = blockIdx.x * blockDim.x + threadIdx.x; // if (global_id >= m) // return; // results[global_id] = ask_device(s_d, r_d, dim, p, k, global_id).second; // } // float *s_points, *r_points; // int k; // struct DimCmp { // int dim; // bool operator()(int lhs, int rhs) const { // return r_points[lhs * k + dim] < r_points[rhs * k + dim]; // } // }; // struct KDTreeGPU { // thrust::host_vector<int> p, dim; // thrust::device_vector<int> p_d, dim_d; // thrust::device_vector<float> s_d, r_d; // KDTreeGPU(int n, int m) : // p(n << 2, -1), dim(p), // s_d(s_points, s_points + k * m), // r_d(r_points, r_points + k * n) // { // thrust::host_vector<int> se(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); // build(se.begin(), se.end()); // dim_d = dim, p_d = p; // } // void build( // thrust::host_vector<int>::iterator beg, // thrust::host_vector<int>::iterator end, // int rt = 1) // { // if (beg >= end) // return; // float sa_max = -INFINITY; // for (int idk = 0; idk < k; ++idk) { // float sum = 0, sa = 0; // for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { // float val = r_points[(*it) * k + idk]; // sum += val, sa += val * val; // } // sa = (sa - sum * sum / (end - beg)) / (end - beg); // if (sa_max < sa) // sa_max = sa, dim[rt] = idk; // } // thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; // std::nth_element(beg, mid, end, DimCmp{dim[rt]}); // p[rt] = *mid; // build(beg, mid, rt << 1); // build(++mid, end, rt << 1 | 1); // } // void range_ask(int m, int *results) { // thrust::device_vector<int> results_d(m); // int minGridSize, blockSize; // CHECK(hipOccupancyMaxPotentialBlockSize( // &minGridSize, // &blockSize, // range_ask_kernel)); // range_ask_kernel<<<divup(m, blockSize), blockSize>>> ( // thrust::raw_pointer_cast(s_d.data()), // thrust::raw_pointer_cast(r_d.data()), // thrust::raw_pointer_cast(dim_d.data()), // thrust::raw_pointer_cast(p_d.data()), // k, m, // thrust::raw_pointer_cast(results_d.data())); // thrust::copy(results_d.begin(), results_d.end(), results); // } // }; // extern void cudaCall( // int k, // // int m, // // int n, // // float *s_points, // // float *r_points, // // int **results) // // { // if (k > 16) // return v0::cudaCall(k, m, n, s_points, r_points, results); // v10::k = k; // v10::s_points = s_points; // v10::r_points = r_points; // KDTreeGPU kd(n, m); // *results = (int *)malloc(sizeof(int) * m); // kd.range_ask(m, *results); // } // } // GPU KD-Tree namespace v11 { /** * @brief GPU * * @param s_d * @param r_d * @param dim * @param p * @param k * @param x * @param ans * @param rt * @return thrust::pair<float, int> */ __device__ thrust::pair<float, int> ask_device( float *s_d, float *r_d, int *dim, int *p, int k, int x, thrust::pair<float, int> ans = {INFINITY, 0}, int rt = 1) { int dimrt = dim[rt]; if (dimrt < 0) return ans; int prt = p[rt]; if (prt < 0) return ans; float d = s_d[x * k + dimrt] - r_d[prt * k + dimrt], tmp = 0; for (int kInd = 0; kInd < k; ++kInd) { float diff = s_d[x * k + kInd] - r_d[prt * k + kInd]; tmp += diff * diff; } int w = d > 0; ans = ask_device(s_d, r_d, dim, p, k, x, thrust::min(ans, {tmp, prt}), (rt << 1) ^ w); if (ans.first > d * d - 1e-6) ans = ask_device(s_d, r_d, dim, p, k, x, ans, (rt << 1) ^ w ^ 1); return ans; } __global__ void range_ask_kernel( float *s_d, float *r_d, int *dim, int *p, int k, int m, int *results) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id >= m) return; // results[global_id] = ask_device(s_d, r_d, dim, p, k, global_id).second; } float *s_points, *r_points; int k; struct DimCmp { int dim; bool operator()(int lhs, int rhs) const { return r_points[lhs * k + dim] < r_points[rhs * k + dim]; } }; struct KDTreeGPU { thrust::host_vector<int> p, dim; thrust::device_vector<int> p_d, dim_d; thrust::device_vector<float> s_d, r_d; KDTreeGPU(int n, int m) : p(n << 2, -1), dim(p), s_d(s_points, s_points + k * m), r_d(r_points, r_points + k * n) { thrust::host_vector<int> se( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); build(se.begin(), se.end()); dim_d = dim, p_d = p; } void build( thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int rt = 1) { if (beg >= end) return; float sa_max = -INFINITY; for (int kInd = 0; kInd < k; ++kInd) { float sum = 0, sa = 0; for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { float val = r_points[(*it) * k + kInd]; sum += val, sa += val * val; } sa = (sa - sum * sum / (end - beg)) / (end - beg); if (sa_max < sa) sa_max = sa, dim[rt] = kInd; } thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; std::nth_element(beg, mid, end, DimCmp{dim[rt]}); p[rt] = *mid; build(beg, mid, rt << 1); build(++mid, end, rt << 1 | 1); } /** * @brief m * * @param m * @param results */ void range_ask(int m, int *results) { thrust::device_vector<int> results_d(m); int minGridSize, blockSize; CHECK(hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, range_ask_kernel)); hipLaunchKernelGGL(( range_ask_kernel), dim3( divup(m, blockSize)), dim3( blockSize), 0, 0, thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(dim_d.data()), thrust::raw_pointer_cast(p_d.data()), k, m, thrust::raw_pointer_cast(results_d.data())); thrust::copy(results_d.begin(), results_d.end(), results); } }; static void cudaCall( int k, int m, int n, float *s_points, float *r_points, int **results) { if (k > 16) return v0::cudaCall(k, m, n, s_points, r_points, results); v11::k = k; v11::s_points = s_points; v11::r_points = r_points; long sta, end; sta = getTime(); KDTreeGPU kd(n, m); end = getTime(); *results = (int *)malloc(sizeof(int) * m); printf("---search on KD-Tree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); // wwz timer; kd.range_ask(m, *results); } } // namespace v10 // CPU :ocTree namespace v12 { int k; float *r_points, *s_points; struct Node { std::vector<int> incl; // float x_c, y_c, z_c; // float radius; // int pos; // 0 1 x2 y3xy4z-1 std::vector<Node> subtree; // int depth; // Node(float x, float y, float z, float r, int position = -1) { x_c = x; y_c = y; z_c = z; radius = r; pos = position; } Node &operator=(const Node &o) { x_c = o.x_c; y_c = o.y_c; z_c = o.z_c; pos = o.pos; incl = o.incl; subtree = o.subtree; return *this; } Node() { depth = 0; } Node(int dep) { depth = dep; pos = -1; } /** * @brief * * @param x x * @param y y * @param z z * @param r */ void setC(float x, float y, float z, float r) { x_c = x; y_c = y; z_c = z; radius = r; } }; struct ocTree { Node treeRoot; ocTree(int n) { std::vector<int> se(n); for (int i = 0; i < n; i++) se[i] = i; treeRoot = build(se.begin(), se.end(), 0); treeRoot.pos = 0; } /** * @brief * * @param beg * @param end * @param depth * @return Node */ Node build(std::vector<int>::iterator beg, std::vector<int>::iterator end, int depth) { if (beg >= end) return Node(depth); float x_min = INFINITY, x_max = -x_min, y_min = INFINITY, y_max = -y_min, z_min = INFINITY, z_max = -z_min; // for (std::vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; x_min = ::min(x_min, point[0]); x_max = ::max(x_max, point[0]); y_min = ::min(y_min, point[1]); y_max = ::max(y_max, point[1]); z_min = ::min(z_min, point[2]); z_max = ::max(z_max, point[2]); } float r = ::max((x_max - x_min) / 2, ::max((y_max - y_min) / 2, (z_max - z_min) / 2)); Node root(depth); root.setC((x_min + x_max) / 2, (y_max + y_min) / 2, (z_max + z_min) / 2, r); root.subtree.resize(8, Node(root.depth + 1)); // 8 for (std::vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; int pos = (point[0] > root.x_c) ? 0 : 1; pos |= (point[1] > root.y_c) ? 0 : 2; pos |= (point[2] > root.z_c) ? 0 : 4; root.subtree[pos].incl.push_back((*i)); } root.incl.clear(); for (int i = 0; i < 8; i++) { if (root.subtree[i].depth > 9 || root.subtree[i].incl.size() <= 1) { root.subtree[i].pos = -1; // } else { root.subtree[i] = build(root.subtree[i].incl.begin(), root.subtree[i].incl.end(), depth + 1); root.subtree[i].incl.clear(); root.subtree[i].pos = i; } } return root; } /** * @brief * * @param root * @param s_point * @param ans * @return std::pair<float, int> */ std::pair<float, int> ask(Node &root, float *s_point, std::pair<float, int> ans = {INFINITY, 0}) { if (root.pos == -1 && root.incl.size() == 0) return ans; // std::pair<float, int> localAns = ans; if (root.incl.size() == 0) { // std::pair<float, int> tmp(INFINITY, 0); int pos = (s_point[0] > root.x_c) ? 0 : 1; pos |= (s_point[1] > root.y_c) ? 0 : 2; pos |= (s_point[2] > root.z_c) ? 0 : 4; tmp = ask(root.subtree[pos], s_point, localAns); // localAns = tmp.first > localAns.first ? localAns : tmp; // Node *rt = &(root.subtree[pos ^ 4]); if (localAns.first > ::min(std::abs(s_point[2] - rt->z_c - rt->radius), std::abs(s_point[2] - rt->z_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 2]); if (localAns.first > ::min(std::abs(s_point[1] - rt->y_c - rt->radius), std::abs(s_point[1] - rt->y_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 1]); if (localAns.first > ::min(std::abs(s_point[0] - rt->x_c - rt->radius), std::abs(s_point[0] - rt->x_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } } // for (std::vector<int>::iterator i = root.incl.begin(); i != root.incl.end(); i++) { float *r_point = &r_points[(*i)]; float dis = ::pow((r_point[0] - s_point[0]), 2); dis += ::pow((r_point[1] - s_point[1]), 2); dis += ::pow((r_point[2] - s_point[2]), 2); if (dis < localAns.first) { localAns.first = dis; localAns.second = *i; } } return localAns; } }; extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results // ) { v12::k = k; if (k != 3) { return v0::cudaCall(k, m, n, s_points, r_points, results); } v12::r_points = r_points; v12::s_points = s_points; long sta, end; sta = getTime(); v12::ocTree bt(n); end = getTime(); printf("---search on ocTree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); *results = (int *)malloc(sizeof(int) * m); int thread = ::min(m, omp_get_max_threads()); #pragma omp parallel for num_threads(thread) for (int i = 0; i < m; i++) (*results)[i] = bt.ask(bt.treeRoot, &s_points[i * k]).second; } } // GPU :ocTree namespace v13 { int k; float *r_points, *s_points; struct Node { thrust::host_vector<int> incl; // float x_c, y_c, z_c; // float radius; // int pos; // 0 1 x2 y3xy4z-1 thrust::host_vector<Node> subtree; // int depth; // Node(float x, float y, float z, float r, int position = -1) { x_c = x; y_c = y; z_c = z; radius = r; pos = position; } Node &operator=(const Node &o) { x_c = o.x_c; y_c = o.y_c; z_c = o.z_c; pos = o.pos; incl = o.incl; subtree = o.subtree; return *this; } Node() { depth = 0; } Node(int dep) { depth = dep; pos = -1; } void setC(float x, float y, float z, float r) { x_c = x; y_c = y; z_c = z; radius = r; } }; /** * @brief GPU * * @param root * @param s_point * @param ans * @param r_points * @param rt * @return thrust::pair<float, int> */ __device__ __host__ thrust::pair<float, int> ask_device( Node root = Node(0), float *s_point = nullptr, thrust::pair<float, int> ans = {INFINITY, 0}, float *r_points = nullptr, int rt = 1) { if (root.pos == -1 && root.incl.size() == 0) return ans; // thrust::pair<float, int> localAns = ans; if (root.incl.size() == 0) { // thrust::pair<float, int> tmp(INFINITY, 0); int pos = (s_point[0] > root.x_c) ? 0 : 1; pos |= (s_point[1] > root.y_c) ? 0 : 2; pos |= (s_point[2] > root.z_c) ? 0 : 4; tmp = ask_device(root.subtree[pos], s_point, localAns); // localAns = tmp.first > localAns.first ? localAns : tmp; // Node *rt = &(root.subtree[pos ^ 4]); if (localAns.first > thrust::min(std::abs(s_point[2] - rt->z_c - rt->radius), std::abs(s_point[2] - rt->z_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 2]); if (localAns.first > thrust::min(std::abs(s_point[1] - rt->y_c - rt->radius), std::abs(s_point[1] - rt->y_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 1]); if (localAns.first > thrust::min(std::abs(s_point[0] - rt->x_c - rt->radius), std::abs(s_point[0] - rt->x_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } } // for (thrust::host_vector<int>::iterator i = root.incl.begin(); i != root.incl.end(); i++) { float *r_point = &r_points[(*i)]; float dis = ::pow((r_point[0] - s_point[0]), 2); dis += ::pow((r_point[1] - s_point[1]), 2); dis += ::pow((r_point[2] - s_point[2]), 2); if (dis < localAns.first) { localAns.first = dis; localAns.second = *i; } } return localAns; } /** * @brief cuda * * @param root * @param s_point * @param r_points * @param m * @param results */ __global__ void range_ask_kernel( Node root = Node(0), float *s_point = nullptr, float *r_points = nullptr, int m = 0, int *results = nullptr) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id > m) { return; } // thrust::pair<float, int> ans = {INFINITY, 0}; // results[global_id] = ask_device(root, s_point, ans, r_points); } struct ocTreeGPU { Node treeRoot; ocTreeGPU(int n) { thrust::host_vector<int> se(n); for (int i = 0; i < n; i++) se[i] = i; treeRoot = build(se.begin(), se.end(), 0); treeRoot.pos = 0; } Node build(thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int depth) { if (beg >= end) return Node(depth); float x_min = INFINITY, x_max = -x_min, y_min = INFINITY, y_max = -y_min, z_min = INFINITY, z_max = -z_min; // for (thrust::host_vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; x_min = ::min(x_min, point[0]); x_max = ::max(x_max, point[0]); y_min = ::min(y_min, point[1]); y_max = ::max(y_max, point[1]); z_min = ::min(z_min, point[2]); z_max = ::max(z_max, point[2]); } float r = thrust::max((x_max - x_min) / 2, thrust::max((y_max - y_min) / 2, (z_max - z_min) / 2)); Node root(depth); root.setC((x_min + x_max) / 2, (y_max + y_min) / 2, (z_max + z_min) / 2, r); root.subtree.resize(8, Node(root.depth + 1)); // 8 for (thrust::host_vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; int pos = (point[0] > root.x_c) ? 0 : 1; pos |= (point[1] > root.y_c) ? 0 : 2; pos |= (point[2] > root.z_c) ? 0 : 4; root.subtree[pos].incl.push_back((*i)); } root.incl.clear(); for (int i = 0; i < 8; i++) { if (root.subtree[i].depth > 9 || root.subtree[i].incl.size() <= 1) { root.subtree[i].pos = -1; // } else { root.subtree[i] = build(root.subtree[i].incl.begin(), root.subtree[i].incl.end(), depth + 1); root.subtree[i].incl.clear(); root.subtree[i].pos = i; } } return root; } void range_ask(int m, int *results) { thrust::device_vector<int> result(m); int minGridSize, blockSize; CHECK(hipOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, range_ask_kernel)); hipLaunchKernelGGL(( range_ask_kernel), dim3(divup(m, blockSize)), dim3(blockSize), 0, 0, treeRoot, s_points, r_points, m, thrust::raw_pointer_cast(result.data())); thrust::copy(result.begin(), result.end(), results); } }; extern void cudaCall( int k, // int m, // int n, // float *s_points, // float *r_points, // int **results // ) { v13::k = k; if (k != 3) { return v0::cudaCall(k, m, n, s_points, r_points, results); } v13::r_points = r_points; v13::s_points = s_points; long sta, end; sta = getTime(); v13::ocTreeGPU bt(n); end = getTime(); printf("---search on ocTree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); *results = (int *)malloc(sizeof(int) * m); bt.range_ask(m, *results); } } struct WarmUP { /** * @brief GPU * * @param k * @param m * @param n */ WarmUP(int k, int m, int n) { float *s_points = (float *)malloc(sizeof(float) * k * m); float *r_points = (float *)malloc(sizeof(float) * k * n); #pragma omp parallel { unsigned int seed = omp_get_thread_num(); // #pragma omp for for (int i = 0; i < k * m; ++i) s_points[i] = rand_r(&seed) / double(RAND_MAX); // #pragma omp for for (int i = 0; i < k * n; ++i) r_points[i] = rand_r(&seed) / double(RAND_MAX); } for (int i = 0; i < 10; ++i) { int *result; v9::cudaCall(k, m, n, s_points, r_points, &result); free(result); } free(s_points); free(r_points); } }; static WarmUP warm_up(1, 1, 1 << 15);
a1263940af2904cc2abcff37d2425ea59a38c946.cu
#include <stdio.h> #include <math.h> #include <algorithm> #include <omp.h> #include "utils.h" #include <cuda_runtime.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> // CPU 串行版本 namespace v0 { /** * @brief CPU 串行版本 * * @param k 空间维度 * @param m 查询点数量 * @param n 参考点数量 * @param s_points 查询点集 * @param r_points 参考点集 * @param results 存放结果 */ extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { int *tmp = (int *)malloc(sizeof(int) * m); for (int i = 0; i < m; ++i) { float minSum = INFINITY; int index = 0; for (int j = 0; j < n; ++j) { float tempSum = 0; for (int t = 0; t < k; ++t) { const float diff = s_points[i * k + t] - r_points[j * k + t]; tempSum += diff * diff; // 计算距离 } if (minSum > tempSum) { // 找出最小点 minSum = tempSum; index = j; } } tmp[i] = index; } *results = tmp; } } // GPU: 先计算 m*n 的距离矩阵,再求最近邻点 namespace v1 { extern __global__ void get_dis_kernel( const int k, // 空间维度 const int m, // 查询点数量 const int n, // 参考点数量 const float *__restrict__ s_points, // 查询点集 const float *__restrict__ r_points, // 参考点集 float *__restrict__ dis) // 最近邻点集 { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idm = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idm < m) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } dis[idn + idm * n] = tempSum; // 计算 m*n 的距离矩阵 } } /** * @brief 共享内存树形归约 * * @param m 查询点数量 * @param n 参考点数量 * @param dis 距离向量 * @param result 结果 */ template <int BLOCK_DIM> static __global__ void get_min_kernel( // 共享内存树形归约 const int m, const int n, const float *__restrict__ dis, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { const float tempSum = dis[idn + blockIdx.y * n]; if (dis_s[threadIdx.x] > tempSum) { // 赋值到共享内存 dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { // 树形归约 if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { float *dis_d, *s_d, *r_d; int *results_d; CHECK(cudaMalloc((void **)&dis_d, m * n * sizeof(float))); CHECK(cudaMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(cudaMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&results_d, m * sizeof(int))); CHECK(cudaMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; // 设置blockSize get_dis_kernel<<< dim3(divup(n, BLOCK_DIM_X), divup(m, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>(k, m, n, s_d, r_d, dis_d); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; // 设置blockSize get_min_kernel<BLOCK_DIM><<<m, BLOCK_DIM>>>(m, n, dis_d, results_d); // 计算最近邻点 CHECK(cudaMemcpy((void **)*results, (void *)results_d, m * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(dis_d)); CHECK(cudaFree(s_d)); CHECK(cudaFree(r_d)); CHECK(cudaFree(results_d)); } }; // GPU: 使用thrust库 namespace v2 { extern __global__ void get_dis_kernel( const int k, // 空间维度 const int m, // 查询点数量 const int n, // 参考点数量 const float *__restrict__ s_points, // 查询点集 const float *__restrict__ r_points, // 参考点集 float *__restrict__ dis) // 最近邻点集 { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idm = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idm < m) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } dis[idn + idm * n] = tempSum; // 计算 m*n 的距离矩阵 } } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { thrust::device_vector<float> dis_d(m * n); thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(r_points, r_points + k * n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; // 设置blockSize get_dis_kernel<<< dim3(divup(n, BLOCK_DIM_X), divup(m, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>( k, m, n, thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(dis_d.data())); *results = (int *)malloc(sizeof(int) * m); for (int i = 0; i < m; ++i) // 找出最近邻点 (*results)[i] = thrust::min_element(dis_d.begin() + n * i, dis_d.begin() + n * i + n) - dis_d.begin() - n * i; } }; // GPU:计算距离并同时归约 namespace v3 { /** * @brief cuda 计算距离同时归约核函数 * * @tparam BLOCK_DIM * @param k * @param m * @param n * @param s_points * @param r_points * @param result */ template <int BLOCK_DIM> static __global__ void cudaCallKernel( const int k, const int m, const int n, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { // 计算距离 const float diff = s_points[idk + idm * k] - r_points[idk + idn * k]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { // 树形归约 if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { float *s_d, *r_d; int *results_d; CHECK(cudaMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(cudaMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&results_d, m * sizeof(int))); CHECK(cudaMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), cudaMemcpyHostToDevice)); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; // 设置blockSize cudaCallKernel<BLOCK_DIM><<<m, BLOCK_DIM>>>(k, m, n, s_d, r_d, results_d); // 计算最近邻点 CHECK(cudaMemcpy((void **)*results, (void *)results_d, m * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(s_d)); CHECK(cudaFree(r_d)); CHECK(cudaFree(results_d)); } }; // GPU:AoS2SoA namespace v4 { /** * @brief 转置矩阵 的 核函数 * * @param k 矩阵长 * @param n 矩阵宽 * @param input 输入矩阵的指针 * @param output 输出矩阵的指针 */ static __global__ void mat_inv_kernel( const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { float *s_d, *r_d, *rr_d; int *results_d; CHECK(cudaMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(cudaMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&rr_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&results_d, m * sizeof(int))); CHECK(cudaMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), cudaMemcpyHostToDevice)); CHECK(cudaMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; mat_inv_kernel<<< dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>(k, n, r_d, rr_d); const int BLOCK_DIM = 1024; cudaCallKernel<BLOCK_DIM><<<m, BLOCK_DIM>>>(k, m, n, s_d, rr_d, results_d); // 计算最近邻点 CHECK(cudaMemcpy((void **)*results, (void *)results_d, m * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(s_d)); CHECK(cudaFree(r_d)); CHECK(cudaFree(rr_d)); CHECK(cudaFree(results_d)); } }; // GPU:使用纹理内存存储参考点集 namespace v5 { template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const float *__restrict__ s_points, cudaTextureObject_t texObj, //使用纹理对象 int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - tex2D<float>(texObj, idk, idn); tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { if (n > 65536) { // 纹理内存最大限制 v4::cudaCall(k, m, n, s_points, r_points, results); return; } cudaArray *cuArray; cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); CHECK(cudaMallocArray(&cuArray, &channelDesc, k, n)); CHECK(cudaMemcpy2DToArray(cuArray, 0, 0, r_points, sizeof(float) * k, sizeof(float) * k, n, cudaMemcpyHostToDevice)); // 绑定纹理到cudaArray上 struct cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeArray; resDesc.res.array.array = cuArray; // 设置纹理为只读 struct cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; // 创建纹理对象 cudaTextureObject_t texObj = 0; CHECK(cudaCreateTextureObject(&texObj, &resDesc, &texDesc, NULL)); float *s_d; int *results_d; CHECK(cudaMalloc((void **)&s_d, k * m * sizeof(float))); CHECK(cudaMalloc((void **)&results_d, m * sizeof(int))); CHECK(cudaMemcpy((void *)s_d, (void *)s_points, k * m * sizeof(float), cudaMemcpyHostToDevice)); *results = (int *)malloc(sizeof(int) * m); const int BLOCK_DIM = 1024; cudaCallKernel<BLOCK_DIM><<<m, BLOCK_DIM>>>(k, m, n, s_d, texObj, results_d); // 计算最近邻点 // 销毁纹理对象 CHECK(cudaDestroyTextureObject(texObj)); CHECK(cudaMemcpy((void **)*results, (void *)results_d, m * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(s_d)); CHECK(cudaFree(results_d)); } }; // GPU:使用常量内存存储转置参考点集 namespace v6 { static __constant__ float const_mem[(64 << 10) / sizeof(float)]; // 常量内存最大限制 static __global__ void mat_inv_kernel( // 矩阵转置 const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= m) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = const_mem[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { if (k * m > (64 << 10) / sizeof(float)) { v4::cudaCall(k, m, n, s_points, r_points, results); return; } CHECK(cudaMemcpyToSymbol(const_mem, s_points, sizeof(float) * k * m)); // 拷贝搜索点集到常量内存 float *r_d, *rr_d; int *results_d; CHECK(cudaMalloc((void **)&r_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&rr_d, k * n * sizeof(float))); CHECK(cudaMalloc((void **)&results_d, m * sizeof(int))); CHECK(cudaMemcpy((void *)r_d, (void *)r_points, k * n * sizeof(float), cudaMemcpyHostToDevice)); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; mat_inv_kernel<<< dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>(k, n, r_d, rr_d); const int BLOCK_DIM = 1024; cudaCallKernel<BLOCK_DIM><<<m, BLOCK_DIM>>>(k, m, n, rr_d, results_d); // 计算最近邻点 CHECK(cudaMemcpy((void **)*results, (void *)results_d, m * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaFree(r_d)); CHECK(cudaFree(rr_d)); CHECK(cudaFree(results_d)); } }; // GPU:多个block归约 namespace v7 { static __global__ void mat_inv_kernel( // 矩阵转置 const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(r_points, r_points + k * n); thrust::device_vector<float> rr_d(k * n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; mat_inv_kernel<<< dim3(divup(n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>( k, n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( // 获取启动的block数量 &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); cudaCallKernel<BLOCK_DIM><<<dim3(results_d.size() / m, m), BLOCK_DIM>>>( k, m, n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); *results = (int *)malloc(sizeof(int) * m); if (results_d.size() == m) { thrust::copy(results_d.begin(), results_d.end(), *results); return; } thrust::host_vector<int> results_tmp(results_d); for (int idm = 0; idm < m; ++idm) { // CPU端归约查找最近邻点 float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // GPU: 多卡归约 namespace v8 { static __global__ void mat_inv_kernel( // 矩阵转置 const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); for (int offset = BLOCK_DIM >> 1; offset > 0; offset >>= 1) { if (threadIdx.x < offset) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ offset]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ offset]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ offset]; } __syncthreads(); } if (threadIdx.x == 0) result[id] = ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { thrust::host_vector<int> results_tmp; int num_gpus = 0; CHECK(cudaGetDeviceCount(&num_gpus)); // 获得显卡数 if (num_gpus > n) num_gpus = n; if (num_gpus < 1) return v0::cudaCall(k, m, n, s_points, r_points, results); if (n <= thrust::min(1 << 18, m << 10)) return v7::cudaCall(k, m, n, s_points, r_points, results); #pragma omp parallel num_threads(num_gpus) // 多卡并行 { int thread_num = omp_get_thread_num(); int thread_n = divup(n, num_gpus); float *thread_r_points = r_points + thread_num * thread_n * k; // 为每张显卡分配定量的参考点集 if (thread_num == num_gpus - 1) { thread_n = n - thread_num * thread_n; if (thread_n == 0) { thread_n = 1; thread_r_points -= k; } } CHECK(cudaSetDevice(thread_num)); // 选择对应的显卡 thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(thread_r_points, thread_r_points + k * thread_n); thrust::device_vector<float> rr_d(k * thread_n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; mat_inv_kernel<<< dim3(divup(thread_n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>( k, thread_n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); cudaCallKernel<BLOCK_DIM><<<dim3(results_d.size() / m, m), BLOCK_DIM>>>( k, m, thread_n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); int my_beg, my_end; #pragma omp critical // 临界区将多卡结果合并 { my_beg = results_tmp.size(); results_tmp.insert(results_tmp.end(), results_d.begin(), results_d.end()); my_end = results_tmp.size(); } #pragma omp barrier // 多卡同步 for (int offset = (thread_r_points - r_points) / k; my_beg < my_end; ++my_beg) results_tmp[my_beg] += offset; // 将每张卡上的参考点index转为全局index } *results = (int *)malloc(sizeof(int) * m); for (int idm = 0; idm < m; ++idm) { // CPU端归约查找最近邻点 float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // GPU:多卡规约+循环展开 namespace v9 { static __global__ void mat_inv_kernel( // 矩阵转置 const int k, const int n, const float *__restrict__ input, float *__restrict__ output) { const int idn = threadIdx.x + blockIdx.x * blockDim.x; const int idk = threadIdx.y + blockIdx.y * blockDim.y; if (idn < n && idk < k) { const float a = input[idn * k + idk]; output[idn + idk * n] = a; } } template <int BLOCK_DIM> static __global__ void cudaCallKernel( // 计算距离并归约 const int k, const int m, const int n, const int result_size, const float *__restrict__ s_points, const float *__restrict__ r_points, int *__restrict__ result) { const int id = blockIdx.x * gridDim.y + blockIdx.y; if (id >= result_size) return; __shared__ float dis_s[BLOCK_DIM]; __shared__ int ind_s[BLOCK_DIM]; dis_s[threadIdx.x] = INFINITY; ind_s[threadIdx.x] = 0; for (int idm = blockIdx.y, idn = threadIdx.x + blockIdx.x * BLOCK_DIM; idn < n; idn += gridDim.x * BLOCK_DIM) { float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[idk + idm * k] - r_points[idk * n + idn]; tempSum += diff * diff; } if (dis_s[threadIdx.x] > tempSum) { dis_s[threadIdx.x] = tempSum; ind_s[threadIdx.x] = idn; } } __syncthreads(); if (threadIdx.x < 512) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 512]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 512]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 512]; } __syncthreads(); if (threadIdx.x < 256) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 256]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 256]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 256]; } __syncthreads(); if (threadIdx.x < 128) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 128]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 128]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 128]; } __syncthreads(); if (threadIdx.x < 64) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 64]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 64]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 64]; } __syncthreads(); if (threadIdx.x < 32) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 32]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 32]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 32]; } if (threadIdx.x < 16) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 16]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 16]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 16]; } if (threadIdx.x < 8) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 8]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 8]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 8]; } if (threadIdx.x < 4) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 4]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 4]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 4]; } if (threadIdx.x < 2) if (dis_s[threadIdx.x] > dis_s[threadIdx.x ^ 2]) { dis_s[threadIdx.x] = dis_s[threadIdx.x ^ 2]; ind_s[threadIdx.x] = ind_s[threadIdx.x ^ 2]; } if (threadIdx.x == 0) result[id] = dis_s[0] > dis_s[1] ? ind_s[1] : ind_s[0]; } extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { thrust::host_vector<int> results_tmp; int num_gpus = 0; CHECK(cudaGetDeviceCount(&num_gpus)); // 获得显卡数 if (num_gpus > n) num_gpus = n; if (num_gpus < 1) return v0::cudaCall(k, m, n, s_points, r_points, results); if (n <= thrust::min(1 << 18, m << 10)) return v7::cudaCall(k, m, n, s_points, r_points, results); #pragma omp parallel num_threads(num_gpus) // 多卡并行 { int thread_num = omp_get_thread_num(); int thread_n = divup(n, num_gpus); float *thread_r_points = r_points + thread_num * thread_n * k; // 为每张显卡分配定量的参考点集 if (thread_num == num_gpus - 1) { thread_n = n - thread_num * thread_n; if (thread_n == 0) { thread_n = 1; thread_r_points -= k; } } CHECK(cudaSetDevice(thread_num)); // 选择对应的显卡 thrust::device_vector<float> s_d(s_points, s_points + k * m); thrust::device_vector<float> r_d(thread_r_points, thread_r_points + k * thread_n); thrust::device_vector<float> rr_d(k * thread_n); const int BLOCK_DIM_X = 32, BLOCK_DIM_Y = 32; mat_inv_kernel<<< dim3(divup(thread_n, BLOCK_DIM_X), divup(k, BLOCK_DIM_Y)), dim3(BLOCK_DIM_X, BLOCK_DIM_Y)>>>( k, thread_n, thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(rr_d.data())); const int BLOCK_DIM = 1024; int numBlocks; CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor( &numBlocks, cudaCallKernel<BLOCK_DIM>, BLOCK_DIM, 0)); thrust::device_vector<int> results_d(m * divup(numBlocks, m)); cudaCallKernel<BLOCK_DIM><<<dim3(results_d.size() / m, m), BLOCK_DIM>>>( k, m, thread_n, results_d.size(), thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(rr_d.data()), thrust::raw_pointer_cast(results_d.data())); int my_beg, my_end; #pragma omp critical // 临界区将多卡结果合并 { my_beg = results_tmp.size(); results_tmp.insert(results_tmp.end(), results_d.begin(), results_d.end()); my_end = results_tmp.size(); } #pragma omp barrier // 多卡同步 for (int offset = (thread_r_points - r_points) / k; my_beg < my_end; ++my_beg) results_tmp[my_beg] += offset; // 将每张卡上的参考点index转为全局index } *results = (int *)malloc(sizeof(int) * m); for (int idm = 0; idm < m; ++idm) { // CPU端归约查找最近邻点 float minSum = INFINITY; int index = 0; for (int i = 0; i < results_tmp.size(); i += m) { const int idn = results_tmp[i]; float tempSum = 0; for (int idk = 0; idk < k; ++idk) { const float diff = s_points[k * idm + idk] - r_points[k * idn + idk]; tempSum += diff * diff; } if (minSum > tempSum) { minSum = tempSum; index = idn; } } (*results)[idm] = index; } } }; // CPU :KDTree namespace v10 { float *s_points, *r_points; int k; struct DimCmp { int dim; bool operator()(int lhs, int rhs) const { return r_points[lhs * k + dim] < r_points[rhs * k + dim]; } }; struct KDTreeCPU { thrust::host_vector<int> p, dim; /** * @brief Construct a new KDTreeCPU object * * @param n 给定参考点数量 */ KDTreeCPU(int n) : p(n << 2, -1), dim(p) { thrust::host_vector<int> se(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); build(se.begin(), se.end()); } /** * @brief 递归调用,建立 kd-tree 子树 * * @param beg 子树囊括的顶点集合开始处 * @param end 子树囊括的顶点集合结束处 * @param rt 树根所在维度 */ void build(thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int rt = 1) { if (beg >= end) return; float sa_max = -INFINITY; for (int idk = 0; idk < k; ++idk) { float sum = 0, sa = 0; for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { float val = r_points[(*it) * k + idk]; sum += val, sa += val * val; } sa = (sa - sum * sum / (end - beg)) / (end - beg); if (sa_max < sa) sa_max = sa, dim[rt] = idk; } thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; std::nth_element(beg, mid, end, DimCmp{dim[rt]}); p[rt] = *mid; build(beg, mid, rt << 1); build(++mid, end, rt << 1 | 1); } /** * @brief 查询函数 * * @param x 给定查询点位置 * @param ans 当前最优解 * @param rt 树根所在维度 * @return thrust::pair<float, int> */ thrust::pair<float, int> ask(int x, thrust::pair<float, int> ans = {INFINITY, 0}, int rt = 1) { if (dim[rt] < 0) return ans; float d = s_points[x * k + dim[rt]] - r_points[p[rt] * k + dim[rt]], tmp = 0; for (int idk = 0; idk < k; ++idk) { float diff = s_points[x * k + idk] - r_points[p[rt] * k + idk]; tmp += diff * diff; } int w = d > 0; ans = ask(x, min(ans, {tmp, p[rt]}), (rt << 1) ^ w); if (ans.first > d * d - 1e-6) ans = ask(x, ans, (rt << 1) ^ w ^ 1); return ans; } }; extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results) // 最近邻点集 { if (k > 16) return v0::cudaCall(k, m, n, s_points, r_points, results); v10::k = k; v10::s_points = s_points; v10::r_points = r_points; long sta, end; sta = getTime(); KDTreeCPU kd(n); end = getTime(); *results = (int *)malloc(sizeof(int) * m); printf("---search on KD-Tree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); for (int i = 0; i < m; ++i) (*results)[i] = kd.ask(i).second; } } // GPU :KDTree // namespace v10 // { // __device__ thrust::pair<float, int> ask_device( // float *s_d, // float *r_d, // int *dim, // int *p, // int k, // int x, // thrust::pair<float, int> ans = {INFINITY, 0}, // int rt = 1) // { // int dimrt = dim[rt]; // if (dimrt < 0) // return ans; // int prt = p[rt];` // if (prt < 0) // return ans; // float d = s_d[x * k + dimrt] - r_d[prt * k + dimrt], tmp = 0; // for (int idk = 0; idk < k; ++idk) { // float diff = s_d[x * k + idk] - r_d[prt * k + idk]; // tmp += diff * diff; // } // int w = d > 0; // ans = ask_device(s_d, r_d, dim, p, k, x, thrust::min(ans, {tmp, prt}), (rt << 1) ^ w); // if (ans.first > d * d - 1e-6) // ans = ask_device(s_d, r_d, dim, p, k, x, ans, (rt << 1) ^ w ^ 1); // return ans; // } // __global__ void range_ask_kernel( // float *s_d, // float *r_d, // int *dim, // int *p, // int k, // int m, // int *results) // { // int global_id = blockIdx.x * blockDim.x + threadIdx.x; // if (global_id >= m) // return; // results[global_id] = ask_device(s_d, r_d, dim, p, k, global_id).second; // } // float *s_points, *r_points; // int k; // struct DimCmp { // int dim; // bool operator()(int lhs, int rhs) const { // return r_points[lhs * k + dim] < r_points[rhs * k + dim]; // } // }; // struct KDTreeGPU { // thrust::host_vector<int> p, dim; // thrust::device_vector<int> p_d, dim_d; // thrust::device_vector<float> s_d, r_d; // KDTreeGPU(int n, int m) : // p(n << 2, -1), dim(p), // s_d(s_points, s_points + k * m), // r_d(r_points, r_points + k * n) // { // thrust::host_vector<int> se(thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); // build(se.begin(), se.end()); // dim_d = dim, p_d = p; // } // void build( // thrust::host_vector<int>::iterator beg, // thrust::host_vector<int>::iterator end, // int rt = 1) // { // if (beg >= end) // return; // float sa_max = -INFINITY; // for (int idk = 0; idk < k; ++idk) { // float sum = 0, sa = 0; // for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { // float val = r_points[(*it) * k + idk]; // sum += val, sa += val * val; // } // sa = (sa - sum * sum / (end - beg)) / (end - beg); // if (sa_max < sa) // sa_max = sa, dim[rt] = idk; // } // thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; // std::nth_element(beg, mid, end, DimCmp{dim[rt]}); // p[rt] = *mid; // build(beg, mid, rt << 1); // build(++mid, end, rt << 1 | 1); // } // void range_ask(int m, int *results) { // thrust::device_vector<int> results_d(m); // int minGridSize, blockSize; // CHECK(cudaOccupancyMaxPotentialBlockSize( // &minGridSize, // &blockSize, // range_ask_kernel)); // range_ask_kernel<<<divup(m, blockSize), blockSize>>> ( // thrust::raw_pointer_cast(s_d.data()), // thrust::raw_pointer_cast(r_d.data()), // thrust::raw_pointer_cast(dim_d.data()), // thrust::raw_pointer_cast(p_d.data()), // k, m, // thrust::raw_pointer_cast(results_d.data())); // thrust::copy(results_d.begin(), results_d.end(), results); // } // }; // extern void cudaCall( // int k, // 空间维度 // int m, // 查询点数量 // int n, // 参考点数量 // float *s_points, // 查询点集 // float *r_points, // 参考点集 // int **results) // 最近邻点集 // { // if (k > 16) // return v0::cudaCall(k, m, n, s_points, r_points, results); // v10::k = k; // v10::s_points = s_points; // v10::r_points = r_points; // KDTreeGPU kd(n, m); // *results = (int *)malloc(sizeof(int) * m); // kd.range_ask(m, *results); // } // } // GPU KD-Tree namespace v11 { /** * @brief GPU 上查询函数 * * @param s_d 查询点集合 * @param r_d 参考点集合 * @param dim 树根维度集合 * @param p 树根参考点集合 * @param k 维度 * @param x 查询点下表 * @param ans 当前最优解 * @param rt 当前子树位置 * @return thrust::pair<float, int> */ __device__ thrust::pair<float, int> ask_device( float *s_d, float *r_d, int *dim, int *p, int k, int x, thrust::pair<float, int> ans = {INFINITY, 0}, int rt = 1) { int dimrt = dim[rt]; if (dimrt < 0) return ans; int prt = p[rt]; if (prt < 0) return ans; float d = s_d[x * k + dimrt] - r_d[prt * k + dimrt], tmp = 0; for (int kInd = 0; kInd < k; ++kInd) { float diff = s_d[x * k + kInd] - r_d[prt * k + kInd]; tmp += diff * diff; } int w = d > 0; ans = ask_device(s_d, r_d, dim, p, k, x, thrust::min(ans, {tmp, prt}), (rt << 1) ^ w); if (ans.first > d * d - 1e-6) ans = ask_device(s_d, r_d, dim, p, k, x, ans, (rt << 1) ^ w ^ 1); return ans; } __global__ void range_ask_kernel( float *s_d, float *r_d, int *dim, int *p, int k, int m, int *results) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id >= m) return; // results[global_id] = ask_device(s_d, r_d, dim, p, k, global_id).second; } float *s_points, *r_points; int k; struct DimCmp { int dim; bool operator()(int lhs, int rhs) const { return r_points[lhs * k + dim] < r_points[rhs * k + dim]; } }; struct KDTreeGPU { thrust::host_vector<int> p, dim; thrust::device_vector<int> p_d, dim_d; thrust::device_vector<float> s_d, r_d; KDTreeGPU(int n, int m) : p(n << 2, -1), dim(p), s_d(s_points, s_points + k * m), r_d(r_points, r_points + k * n) { thrust::host_vector<int> se( thrust::counting_iterator<int>(0), thrust::counting_iterator<int>(n)); build(se.begin(), se.end()); dim_d = dim, p_d = p; } void build( thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int rt = 1) { if (beg >= end) return; float sa_max = -INFINITY; for (int kInd = 0; kInd < k; ++kInd) { float sum = 0, sa = 0; for (thrust::host_vector<int>::iterator it = beg; it != end; ++it) { float val = r_points[(*it) * k + kInd]; sum += val, sa += val * val; } sa = (sa - sum * sum / (end - beg)) / (end - beg); if (sa_max < sa) sa_max = sa, dim[rt] = kInd; } thrust::host_vector<int>::iterator mid = beg + (end - beg) / 2; std::nth_element(beg, mid, end, DimCmp{dim[rt]}); p[rt] = *mid; build(beg, mid, rt << 1); build(++mid, end, rt << 1 | 1); } /** * @brief 对 m 个查询点的求解最近邻 * * @param m 查询点数量 * @param results 结果存放 */ void range_ask(int m, int *results) { thrust::device_vector<int> results_d(m); int minGridSize, blockSize; CHECK(cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, range_ask_kernel)); range_ask_kernel<<< divup(m, blockSize), blockSize>>>( thrust::raw_pointer_cast(s_d.data()), thrust::raw_pointer_cast(r_d.data()), thrust::raw_pointer_cast(dim_d.data()), thrust::raw_pointer_cast(p_d.data()), k, m, thrust::raw_pointer_cast(results_d.data())); thrust::copy(results_d.begin(), results_d.end(), results); } }; static void cudaCall( int k, int m, int n, float *s_points, float *r_points, int **results) { if (k > 16) return v0::cudaCall(k, m, n, s_points, r_points, results); v11::k = k; v11::s_points = s_points; v11::r_points = r_points; long sta, end; sta = getTime(); KDTreeGPU kd(n, m); end = getTime(); *results = (int *)malloc(sizeof(int) * m); printf("---search on KD-Tree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); // wwz timer; kd.range_ask(m, *results); } } // namespace v10 // CPU :ocTree namespace v12 { int k; float *r_points, *s_points; struct Node { std::vector<int> incl; // 包含哪些点集 float x_c, y_c, z_c; // 八角树划分中心 float radius; // 半径 int pos; // 标号 0 表示全部大于中心,1 表示 x小于,2 表示y小于,3表示xy小于,4表示z小于……-1表示没有 std::vector<Node> subtree; // 子树 int depth; // 表示深度 Node(float x, float y, float z, float r, int position = -1) { x_c = x; y_c = y; z_c = z; radius = r; pos = position; } Node &operator=(const Node &o) { x_c = o.x_c; y_c = o.y_c; z_c = o.z_c; pos = o.pos; incl = o.incl; subtree = o.subtree; return *this; } Node() { depth = 0; } Node(int dep) { depth = dep; pos = -1; } /** * @brief 设定树根中心所在位置 * * @param x x 维度的位置 * @param y y 维度的位置 * @param z z 维度的位置 * @param r 方块最小边长 */ void setC(float x, float y, float z, float r) { x_c = x; y_c = y; z_c = z; radius = r; } }; struct ocTree { Node treeRoot; ocTree(int n) { std::vector<int> se(n); for (int i = 0; i < n; i++) se[i] = i; treeRoot = build(se.begin(), se.end(), 0); treeRoot.pos = 0; } /** * @brief 建树 * * @param beg 子树包含的数据起始处 * @param end 子树包含的数据结束处 * @param depth 当前子树深度 * @return Node 树根结构体 */ Node build(std::vector<int>::iterator beg, std::vector<int>::iterator end, int depth) { if (beg >= end) return Node(depth); float x_min = INFINITY, x_max = -x_min, y_min = INFINITY, y_max = -y_min, z_min = INFINITY, z_max = -z_min; // 找到当前点集三个维度最大和最小值 for (std::vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; x_min = std::min(x_min, point[0]); x_max = std::max(x_max, point[0]); y_min = std::min(y_min, point[1]); y_max = std::max(y_max, point[1]); z_min = std::min(z_min, point[2]); z_max = std::max(z_max, point[2]); } float r = std::max((x_max - x_min) / 2, std::max((y_max - y_min) / 2, (z_max - z_min) / 2)); Node root(depth); root.setC((x_min + x_max) / 2, (y_max + y_min) / 2, (z_max + z_min) / 2, r); root.subtree.resize(8, Node(root.depth + 1)); // 建立 8 课子树 for (std::vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; int pos = (point[0] > root.x_c) ? 0 : 1; pos |= (point[1] > root.y_c) ? 0 : 2; pos |= (point[2] > root.z_c) ? 0 : 4; root.subtree[pos].incl.push_back((*i)); } root.incl.clear(); for (int i = 0; i < 8; i++) { if (root.subtree[i].depth > 9 || root.subtree[i].incl.size() <= 1) { root.subtree[i].pos = -1; // 划分为 叶子节点 } else { root.subtree[i] = build(root.subtree[i].incl.begin(), root.subtree[i].incl.end(), depth + 1); root.subtree[i].incl.clear(); root.subtree[i].pos = i; } } return root; } /** * @brief 查询函数 * * @param root 当前树根 * @param s_point 查询点集合 * @param ans 当前最优解 * @return std::pair<float, int> */ std::pair<float, int> ask(Node &root, float *s_point, std::pair<float, int> ans = {INFINITY, 0}) { if (root.pos == -1 && root.incl.size() == 0) return ans; // 空节点 std::pair<float, int> localAns = ans; if (root.incl.size() == 0) { // 非叶子节点 std::pair<float, int> tmp(INFINITY, 0); int pos = (s_point[0] > root.x_c) ? 0 : 1; pos |= (s_point[1] > root.y_c) ? 0 : 2; pos |= (s_point[2] > root.z_c) ? 0 : 4; tmp = ask(root.subtree[pos], s_point, localAns); // 按照树查询下去 localAns = tmp.first > localAns.first ? localAns : tmp; // 取小的一项 Node *rt = &(root.subtree[pos ^ 4]); if (localAns.first > std::min(std::abs(s_point[2] - rt->z_c - rt->radius), std::abs(s_point[2] - rt->z_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 2]); if (localAns.first > std::min(std::abs(s_point[1] - rt->y_c - rt->radius), std::abs(s_point[1] - rt->y_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 1]); if (localAns.first > std::min(std::abs(s_point[0] - rt->x_c - rt->radius), std::abs(s_point[0] - rt->x_c + rt->radius))) { tmp = ask(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } } // 非空的叶子节点 for (std::vector<int>::iterator i = root.incl.begin(); i != root.incl.end(); i++) { float *r_point = &r_points[(*i)]; float dis = std::pow((r_point[0] - s_point[0]), 2); dis += std::pow((r_point[1] - s_point[1]), 2); dis += std::pow((r_point[2] - s_point[2]), 2); if (dis < localAns.first) { localAns.first = dis; localAns.second = *i; } } return localAns; } }; extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results // 最近领点集 ) { v12::k = k; if (k != 3) { return v0::cudaCall(k, m, n, s_points, r_points, results); } v12::r_points = r_points; v12::s_points = s_points; long sta, end; sta = getTime(); v12::ocTree bt(n); end = getTime(); printf("---search on ocTree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); *results = (int *)malloc(sizeof(int) * m); int thread = std::min(m, omp_get_max_threads()); #pragma omp parallel for num_threads(thread) for (int i = 0; i < m; i++) (*results)[i] = bt.ask(bt.treeRoot, &s_points[i * k]).second; } } // GPU :ocTree namespace v13 { int k; float *r_points, *s_points; struct Node { thrust::host_vector<int> incl; // 包含哪些点集 float x_c, y_c, z_c; // 八角树划分中心 float radius; // 半径 int pos; // 标号 0 表示全部大于中心,1 表示 x小于,2 表示y小于,3表示xy小于,4表示z小于……-1表示没有 thrust::host_vector<Node> subtree; // 子树 int depth; // 表示深度 Node(float x, float y, float z, float r, int position = -1) { x_c = x; y_c = y; z_c = z; radius = r; pos = position; } Node &operator=(const Node &o) { x_c = o.x_c; y_c = o.y_c; z_c = o.z_c; pos = o.pos; incl = o.incl; subtree = o.subtree; return *this; } Node() { depth = 0; } Node(int dep) { depth = dep; pos = -1; } void setC(float x, float y, float z, float r) { x_c = x; y_c = y; z_c = z; radius = r; } }; /** * @brief GPU 上查询函数 * * @param root 当前树根 * @param s_point 查询点集合 * @param ans 当前最优解 * @param r_points 参考点集合 * @param rt 树根的在集合中位置 * @return thrust::pair<float, int> */ __device__ __host__ thrust::pair<float, int> ask_device( Node root = Node(0), float *s_point = nullptr, thrust::pair<float, int> ans = {INFINITY, 0}, float *r_points = nullptr, int rt = 1) { if (root.pos == -1 && root.incl.size() == 0) return ans; // 空节点 thrust::pair<float, int> localAns = ans; if (root.incl.size() == 0) { // 非叶子节点 thrust::pair<float, int> tmp(INFINITY, 0); int pos = (s_point[0] > root.x_c) ? 0 : 1; pos |= (s_point[1] > root.y_c) ? 0 : 2; pos |= (s_point[2] > root.z_c) ? 0 : 4; tmp = ask_device(root.subtree[pos], s_point, localAns); // 按照树查询下去 localAns = tmp.first > localAns.first ? localAns : tmp; // 取小的一项 Node *rt = &(root.subtree[pos ^ 4]); if (localAns.first > thrust::min(std::abs(s_point[2] - rt->z_c - rt->radius), std::abs(s_point[2] - rt->z_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 2]); if (localAns.first > thrust::min(std::abs(s_point[1] - rt->y_c - rt->radius), std::abs(s_point[1] - rt->y_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } rt = &(root.subtree[pos ^ 1]); if (localAns.first > thrust::min(std::abs(s_point[0] - rt->x_c - rt->radius), std::abs(s_point[0] - rt->x_c + rt->radius))) { tmp = ask_device(*rt, s_point, localAns); localAns = tmp.first > localAns.first ? localAns : tmp; } } // 非空的叶子节点 for (thrust::host_vector<int>::iterator i = root.incl.begin(); i != root.incl.end(); i++) { float *r_point = &r_points[(*i)]; float dis = std::pow((r_point[0] - s_point[0]), 2); dis += std::pow((r_point[1] - s_point[1]), 2); dis += std::pow((r_point[2] - s_point[2]), 2); if (dis < localAns.first) { localAns.first = dis; localAns.second = *i; } } return localAns; } /** * @brief cuda 查询核函数 * * @param root 当前树根 * @param s_point 查询点 * @param r_points 参考点集合 * @param m 查询点数量 * @param results 结果存放 */ __global__ void range_ask_kernel( Node root = Node(0), float *s_point = nullptr, float *r_points = nullptr, int m = 0, int *results = nullptr) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; if (global_id > m) { return; } // thrust::pair<float, int> ans = {INFINITY, 0}; // results[global_id] = ask_device(root, s_point, ans, r_points); } struct ocTreeGPU { Node treeRoot; ocTreeGPU(int n) { thrust::host_vector<int> se(n); for (int i = 0; i < n; i++) se[i] = i; treeRoot = build(se.begin(), se.end(), 0); treeRoot.pos = 0; } Node build(thrust::host_vector<int>::iterator beg, thrust::host_vector<int>::iterator end, int depth) { if (beg >= end) return Node(depth); float x_min = INFINITY, x_max = -x_min, y_min = INFINITY, y_max = -y_min, z_min = INFINITY, z_max = -z_min; // 找到当前点集三个维度最大和最小值 for (thrust::host_vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; x_min = std::min(x_min, point[0]); x_max = std::max(x_max, point[0]); y_min = std::min(y_min, point[1]); y_max = std::max(y_max, point[1]); z_min = std::min(z_min, point[2]); z_max = std::max(z_max, point[2]); } float r = thrust::max((x_max - x_min) / 2, thrust::max((y_max - y_min) / 2, (z_max - z_min) / 2)); Node root(depth); root.setC((x_min + x_max) / 2, (y_max + y_min) / 2, (z_max + z_min) / 2, r); root.subtree.resize(8, Node(root.depth + 1)); // 建立 8 课子树 for (thrust::host_vector<int>::iterator i = beg; i != end; i++) { float *point = &r_points[(*i)]; int pos = (point[0] > root.x_c) ? 0 : 1; pos |= (point[1] > root.y_c) ? 0 : 2; pos |= (point[2] > root.z_c) ? 0 : 4; root.subtree[pos].incl.push_back((*i)); } root.incl.clear(); for (int i = 0; i < 8; i++) { if (root.subtree[i].depth > 9 || root.subtree[i].incl.size() <= 1) { root.subtree[i].pos = -1; // 划分为 叶子节点 } else { root.subtree[i] = build(root.subtree[i].incl.begin(), root.subtree[i].incl.end(), depth + 1); root.subtree[i].incl.clear(); root.subtree[i].pos = i; } } return root; } void range_ask(int m, int *results) { thrust::device_vector<int> result(m); int minGridSize, blockSize; CHECK(cudaOccupancyMaxPotentialBlockSize( &minGridSize, &blockSize, range_ask_kernel)); range_ask_kernel<<<divup(m, blockSize), blockSize>>>( treeRoot, s_points, r_points, m, thrust::raw_pointer_cast(result.data())); thrust::copy(result.begin(), result.end(), results); } }; extern void cudaCall( int k, // 空间维度 int m, // 查询点数量 int n, // 参考点数量 float *s_points, // 查询点集 float *r_points, // 参考点集 int **results // 最近领点集 ) { v13::k = k; if (k != 3) { return v0::cudaCall(k, m, n, s_points, r_points, results); } v13::r_points = r_points; v13::s_points = s_points; long sta, end; sta = getTime(); v13::ocTreeGPU bt(n); end = getTime(); printf("---search on ocTree: --- "); printf(" %10.3fms to build tree\n", (float)(end - sta) / 1e6); *results = (int *)malloc(sizeof(int) * m); bt.range_ask(m, *results); } } struct WarmUP { /** * @brief GPU 预热 * * @param k 无实际意义 * @param m 无实际意义 * @param n 无实际意义 */ WarmUP(int k, int m, int n) { float *s_points = (float *)malloc(sizeof(float) * k * m); float *r_points = (float *)malloc(sizeof(float) * k * n); #pragma omp parallel { unsigned int seed = omp_get_thread_num(); //每个线程使用不同的随机数种子 #pragma omp for for (int i = 0; i < k * m; ++i) s_points[i] = rand_r(&seed) / double(RAND_MAX); //使用线程安全的随机数函数 #pragma omp for for (int i = 0; i < k * n; ++i) r_points[i] = rand_r(&seed) / double(RAND_MAX); } for (int i = 0; i < 10; ++i) { int *result; v9::cudaCall(k, m, n, s_points, r_points, &result); free(result); } free(s_points); free(r_points); } }; static WarmUP warm_up(1, 1, 1 << 15);
796220f897e063c5d9b48245b538bd2bd4e5874e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } /*int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } printf("Press any key to finish"); getchar(); return 0; }*/ // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
796220f897e063c5d9b48245b538bd2bd4e5874e.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } /*int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } printf("Press any key to finish"); getchar(); return 0; }*/ // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
7602654d29e3fefced4cef34ae3cae09470463a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Babak Poursartip // 02/28/2021 // CUDA //topic: event- measure the time in CUDA. #include <iostream> #include "sm_20_atomic_functions.h" // ============================== __device__ int dSum=0; __global__ void sumArray(int* d) { int tid = threadIdx.x; dSum +=d[tid]; } // ============================== __global__ void sumArrayAtomic(int* d) { int tid = threadIdx.x; atomicAdd(&dSum, d[tid]); } // ============================== int main() { printf(" starts \n"); const int count = 256; const int size = count * sizeof(int); int h[count]; for (int i = 0; i < count; ++i) h[i] = i+1; int* d; hipMalloc(&d, size); hipMemcpy(d,h, size, hipMemcpyHostToDevice); int Sum;hipLaunchKernelGGL(( sumArray), dim3(1), dim3(count), 0, 0, d); hipMemcpyFromSymbol(&Sum, dSum, sizeof(int)); std::cout << " sum is: " << Sum << std::endl; // adding the events to this kernel <<<<<<<<<<<<<<<<<<<<<<<<< hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start); hipLaunchKernelGGL(( sumArrayAtomic), dim3(1), dim3(count), 0, 0, d); hipEventRecord(end); hipEventSynchronize(end); float elapsed; hipEventElapsedTime(&elapsed, start, end); // converts the event into milisecond. // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< hipMemcpyFromSymbol(&Sum, dSum, sizeof(int)); std::cout << " sum is: " << Sum << std::endl; std::cout << " The operation took: " << elapsed << "ms\n"; hipFree(d); printf(" done \n"); return 0; }
7602654d29e3fefced4cef34ae3cae09470463a4.cu
// Babak Poursartip // 02/28/2021 // CUDA //topic: event- measure the time in CUDA. #include <iostream> #include "sm_20_atomic_functions.h" // ============================== __device__ int dSum=0; __global__ void sumArray(int* d) { int tid = threadIdx.x; dSum +=d[tid]; } // ============================== __global__ void sumArrayAtomic(int* d) { int tid = threadIdx.x; atomicAdd(&dSum, d[tid]); } // ============================== int main() { printf(" starts \n"); const int count = 256; const int size = count * sizeof(int); int h[count]; for (int i = 0; i < count; ++i) h[i] = i+1; int* d; cudaMalloc(&d, size); cudaMemcpy(d,h, size, cudaMemcpyHostToDevice); int Sum; sumArray<<<1, count>>>(d); cudaMemcpyFromSymbol(&Sum, dSum, sizeof(int)); std::cout << " sum is: " << Sum << std::endl; // adding the events to this kernel <<<<<<<<<<<<<<<<<<<<<<<<< cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); sumArrayAtomic<<<1, count>>>(d); cudaEventRecord(end); cudaEventSynchronize(end); float elapsed; cudaEventElapsedTime(&elapsed, start, end); // converts the event into milisecond. // <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< cudaMemcpyFromSymbol(&Sum, dSum, sizeof(int)); std::cout << " sum is: " << Sum << std::endl; std::cout << " The operation took: " << elapsed << "ms\n"; cudaFree(d); printf(" done \n"); return 0; }
67b97ff4743d83e06dab4841e9042b55b8e64e48.hip
// !!! This is a file automatically generated by hipify!!! #include<stdio.h> #include"gpu.hpp" #include"io.h" #include"cuda_mpi_routines.h" // #define PRINT_DEVICE_IDS /*! \fn int initialize_cuda_mpi(int myid, int nprocs); * \brief CUDA initialization within MPI. */ int initialize_cuda_mpi(int myid, int nprocs) { int i_device = 0; //GPU device for this process int n_device; //number of GPU devices available hipError_t flag_error; //get the number of cuda devices flag_error = hipGetDeviceCount(&n_device); //check for errors if(flag_error!=hipSuccess) { if(flag_error==hipErrorNoDevice) fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorNoDevice\n",myid,n_device); if(flag_error==hipErrorInsufficientDriver) fprintf(stderr,"hipGetDeviceCount: Error! for myid = %d and n_device = %d; hipErrorInsufficientDriver\n",myid,n_device); fflush(stderr); return 1; } //get host name char pname[MPI_MAX_PROCESSOR_NAME]; //node hostname int pname_length; //length of node hostname MPI_Get_processor_name(pname, &pname_length); //set a cuda device for each process hipSetDevice(myid%n_device); //double check hipGetDevice(&i_device); #ifdef PRINT_DEVICE_IDS printf("In initialize_cuda_mpi: name:%s myid = %d, i_device = %d, n_device = %d\n",pname, myid,i_device,n_device); fflush(stdout); MPI_Barrier(world); #endif return 0; }
67b97ff4743d83e06dab4841e9042b55b8e64e48.cu
#include<stdio.h> #include"gpu.hpp" #include"io.h" #include"cuda_mpi_routines.h" // #define PRINT_DEVICE_IDS /*! \fn int initialize_cuda_mpi(int myid, int nprocs); * \brief CUDA initialization within MPI. */ int initialize_cuda_mpi(int myid, int nprocs) { int i_device = 0; //GPU device for this process int n_device; //number of GPU devices available cudaError_t flag_error; //get the number of cuda devices flag_error = cudaGetDeviceCount(&n_device); //check for errors if(flag_error!=cudaSuccess) { if(flag_error==cudaErrorNoDevice) fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorNoDevice\n",myid,n_device); if(flag_error==cudaErrorInsufficientDriver) fprintf(stderr,"cudaGetDeviceCount: Error! for myid = %d and n_device = %d; cudaErrorInsufficientDriver\n",myid,n_device); fflush(stderr); return 1; } //get host name char pname[MPI_MAX_PROCESSOR_NAME]; //node hostname int pname_length; //length of node hostname MPI_Get_processor_name(pname, &pname_length); //set a cuda device for each process cudaSetDevice(myid%n_device); //double check cudaGetDevice(&i_device); #ifdef PRINT_DEVICE_IDS printf("In initialize_cuda_mpi: name:%s myid = %d, i_device = %d, n_device = %d\n",pname, myid,i_device,n_device); fflush(stdout); MPI_Barrier(world); #endif return 0; }
fd154f45a8e8e0193fc27931d33d48a67bc4b249.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> // sets of fg/bg pixels in python not sorted, so slightly different result // here, total sum over array should be identical __device__ void _fillConsensusArray4( unsigned idx, unsigned idy, unsigned idz, const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); // ignore border pixels if ((idx < (DATAXSIZE-PSXH)) && (idy < (DATAYSIZE-PSYH)) && (idz < (DATAZSIZE-PSZH)) && (idx >= (PSXH)) && (idy >= (PSYH)) && (idz >= (PSZH))){ // only if pixel in foreground if(inPred[mid][idz][idy][idx] <= TH) return; // for all pairs of pixels in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { // offset in patch pixel 1 int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // first element of pair should have high affinity // (to not count every pair twice) float v1 = inPred[po1][idz][idy][idx]; if(v1 <= TH) { continue; } // check if predicted affinity in patch agrees // with corresponding pixel in fg prediction const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } if(inOverlap[z1][y1][x1] != 0){ continue; } // second element of pixel pair for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { // offset in patch pixel 2 int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if (po1 == po2) continue; const int z2 = idz+pz2-PSZH; const int y2 = idy+py2-PSYH; const int x2 = idx+px2-PSXH; // patch pixel should correspond to foreground if(inPred[mid][z2][y2][x2] <= TH) { continue; } if(inOverlap[z2][y2][x2] != 0){ continue; } float v2 = inPred[po2][idz][idy][idx]; // offset from pixel 1 to pixel 2 int zo = pz2-pz1+PSZ-1; int yo = py2-py1+PSY-1; int xo = px2-px1+PSX-1; // if both high affinity, increase consensus // pixel 1 with offset yo/xo to pixel 2 if(v2 > TH) { if(po2 <= po1) continue; // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // 1); // float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH); // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // v3); atomicAdd( &outConsCnt[zo][yo][xo][z1][y1][x1], 1); } // if one foreground/one background, // decrease consensus else if(v2 < TH) { // reverse order if pixel 2 before pixel1 if(po2 <= po1) { zo = pz1-pz2; zo += PSZ-1; yo = py1-py2; yo += PSY-1; xo = px1-px2; xo += PSX-1; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -1); // float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // v3 = v3*4/3; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -v3); atomicAdd( &outConsCnt[zo][yo][xo][z2][y2][x2], 1); } else { // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -1); // v3 = v3*4/3; // float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -v3); atomicAdd( &outConsCnt[zo][yo][xo][z1][y1][x1], 1); } } } } } } } } } } // device function to set the 3D volume __global__ void fillConsensusArray_allPatches4( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { // pixel for this thread: idz, idy, idx unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned idy = blockIdx.y*blockDim.y + threadIdx.y; unsigned idz = blockIdx.z*blockDim.z + threadIdx.z; //unsigned idz = 0; _fillConsensusArray4(idx, idy, idz, inPred, inOverlap, outConsCnt); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } // device function to set the 3D volume __global__ void fillConsensusArray_subsetPatches4( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE], const unsigned patchesIDs[], const uint64_t numPatches) { unsigned id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= numPatches) return; int idz = patchesIDs[id*3+0]; int idy = patchesIDs[id*3+1]; int idx = patchesIDs[id*3+2]; _fillConsensusArray4(idx, idy, idz, inPred, inOverlap, outConsCnt); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } #ifdef MAIN_FILLCONSENSUS #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *outConsensusGPU = allocInitConsensus(); computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU); return 0; } #endif
fd154f45a8e8e0193fc27931d33d48a67bc4b249.cu
#include <cstdint> // sets of fg/bg pixels in python not sorted, so slightly different result // here, total sum over array should be identical __device__ void _fillConsensusArray4( unsigned idx, unsigned idy, unsigned idz, const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); // ignore border pixels if ((idx < (DATAXSIZE-PSXH)) && (idy < (DATAYSIZE-PSYH)) && (idz < (DATAZSIZE-PSZH)) && (idx >= (PSXH)) && (idy >= (PSYH)) && (idz >= (PSZH))){ // only if pixel in foreground if(inPred[mid][idz][idy][idx] <= TH) return; // for all pairs of pixels in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { // offset in patch pixel 1 int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // first element of pair should have high affinity // (to not count every pair twice) float v1 = inPred[po1][idz][idy][idx]; if(v1 <= TH) { continue; } // check if predicted affinity in patch agrees // with corresponding pixel in fg prediction const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } if(inOverlap[z1][y1][x1] != 0){ continue; } // second element of pixel pair for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { // offset in patch pixel 2 int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if (po1 == po2) continue; const int z2 = idz+pz2-PSZH; const int y2 = idy+py2-PSYH; const int x2 = idx+px2-PSXH; // patch pixel should correspond to foreground if(inPred[mid][z2][y2][x2] <= TH) { continue; } if(inOverlap[z2][y2][x2] != 0){ continue; } float v2 = inPred[po2][idz][idy][idx]; // offset from pixel 1 to pixel 2 int zo = pz2-pz1+PSZ-1; int yo = py2-py1+PSY-1; int xo = px2-px1+PSX-1; // if both high affinity, increase consensus // pixel 1 with offset yo/xo to pixel 2 if(v2 > TH) { if(po2 <= po1) continue; // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // 1); // float v3 = (v1*v2 - TH*TH)/(1.0-TH*TH); // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // v3); atomicAdd( &outConsCnt[zo][yo][xo][z1][y1][x1], 1); } // if one foreground/one background, // decrease consensus else if(v2 < TH) { // reverse order if pixel 2 before pixel1 if(po2 <= po1) { zo = pz1-pz2; zo += PSZ-1; yo = py1-py2; yo += PSY-1; xo = px1-px2; xo += PSX-1; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -1); // float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // v3 = v3*4/3; // atomicAdd( // &outCons[zo][yo][xo][z2][y2][x2], // -v3); atomicAdd( &outConsCnt[zo][yo][xo][z2][y2][x2], 1); } else { // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -1); // v3 = v3*4/3; // float v3 = (v1*(1-v2) - TH*TH)/(1.0-TH*TH); // atomicAdd( // &outCons[zo][yo][xo][z1][y1][x1], // -v3); atomicAdd( &outConsCnt[zo][yo][xo][z1][y1][x1], 1); } } } } } } } } } } // device function to set the 3D volume __global__ void fillConsensusArray_allPatches4( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE]) { // pixel for this thread: idz, idy, idx unsigned idx = blockIdx.x*blockDim.x + threadIdx.x; unsigned idy = blockIdx.y*blockDim.y + threadIdx.y; unsigned idz = blockIdx.z*blockDim.z + threadIdx.z; //unsigned idz = 0; _fillConsensusArray4(idx, idy, idz, inPred, inOverlap, outConsCnt); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } // device function to set the 3D volume __global__ void fillConsensusArray_subsetPatches4( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const bool inOverlap[DATAZSIZE][DATAYSIZE][DATAXSIZE], float outConsCnt[][NSY][NSX][DATAZSIZE][DATAYSIZE][DATAXSIZE], const unsigned patchesIDs[], const uint64_t numPatches) { unsigned id = blockIdx.x*blockDim.x + threadIdx.x; if(id >= numPatches) return; int idz = patchesIDs[id*3+0]; int idy = patchesIDs[id*3+1]; int idx = patchesIDs[id*3+2]; _fillConsensusArray4(idx, idy, idz, inPred, inOverlap, outConsCnt); // _fillConsensusArray(idx, idy, idz, inPred, outCons); } #ifdef MAIN_FILLCONSENSUS #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *outConsensusGPU = allocInitConsensus(); computeConsensus(consensusFileName, inPredAffinitiesGPU, outConsensusGPU); return 0; } #endif
f1a1f84aba2141d5edddc8690707722b18bd6157.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "hip_runtime.h" //works for real to real and complex interleaved to complex interleaved template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size> __global__ void transpose_kernel_outplace(hipLaunchParm lp, T *input_matrix, T *output_matrix, size_t input_row_size, size_t input_col_size, size_t input_leading_dim_size, size_t output_leading_dim_size, size_t batch_size) { // WG size can be assumed to be 16 by 16 size_t local_idx_0 = hipThreadIdx_x;// 0-15 size_t local_idx_1 = hipThreadIdx_y;// 0-15 size_t block_idx_0 = hipBlockIdx_x;// index of work groups size_t block_idx_1 = hipBlockIdx_y; size_t block_dim_0 = hipBlockDim_x;// size of work groups 16 size_t block_dim_1 = hipBlockDim_y;// size of work groups 16 size_t grid_dim_0 = hipGridDim_x;// number of blocks // for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16) // for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size; const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size; const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2 const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4 __shared__ T lds[macro_tile_row_size][macro_tile_col_size]; size_t batch_idx = 0; size_t blocks_per_batch = grid_dim_0 / batch_size; batch_idx += (block_idx_0) / blocks_per_batch; input_matrix += batch_idx * input_leading_dim_size * input_row_size; size_t input_offset = 0; input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;// each WG works on 64 by 64 block or 32 by 32 input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size; input_matrix += input_offset; for(int i = 0; i < unroll_factor; i++) { //each iteration 256 work items will read from a 4 x 64 subblock //there are 16 iterations size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor); //transpose happened here lds[subblock_idx_0][subblock_idx_1] = input_matrix[subblock_idx_1 * input_leading_dim_size + subblock_idx_0]; } __syncthreads(); output_matrix += batch_idx * input_col_size * output_leading_dim_size; size_t output_offset = 0; output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size output_offset += block_idx_1 * macro_tile_col_size; output_matrix += output_offset; for(int i = 0; i < unroll_factor; i++) { size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63 T temp = lds[subblock_idx_1][subblock_idx_0]; output_matrix[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = temp;//lds[subblock_idx_1][subblock_idx_0]; } }
f1a1f84aba2141d5edddc8690707722b18bd6157.cu
#include "hip_runtime.h" //works for real to real and complex interleaved to complex interleaved template<typename T, int micro_tile_col_size, int micro_tile_row_size, int wg_col_size, int wg_row_size> __global__ void transpose_kernel_outplace(hipLaunchParm lp, T *input_matrix, T *output_matrix, size_t input_row_size, size_t input_col_size, size_t input_leading_dim_size, size_t output_leading_dim_size, size_t batch_size) { // WG size can be assumed to be 16 by 16 size_t local_idx_0 = hipThreadIdx_x;// 0-15 size_t local_idx_1 = hipThreadIdx_y;// 0-15 size_t block_idx_0 = hipBlockIdx_x;// index of work groups size_t block_idx_1 = hipBlockIdx_y; size_t block_dim_0 = hipBlockDim_x;// size of work groups 16 size_t block_dim_1 = hipBlockDim_y;// size of work groups 16 size_t grid_dim_0 = hipGridDim_x;// number of blocks // for 64 x 64 macro tile size we will need 16 x 4 x 64 blocks (4 x 64 == 16 x 16) // for 32 x 32 macro tile size we will need 4 x 8 x 32 blocks const size_t macro_tile_col_size = micro_tile_col_size * wg_col_size; const size_t macro_tile_row_size = micro_tile_row_size * wg_row_size; const size_t reshape_factor = macro_tile_col_size / block_dim_0; // 64 / 16 = 4 need to fit 4 rows into one row in LDS; 32 / 16 = 2 const size_t unroll_factor = macro_tile_row_size / (block_dim_1 / reshape_factor); // 64 / (16 / 4) = 16; 32 / (16 / 2) = 4 __shared__ T lds[macro_tile_row_size][macro_tile_col_size]; size_t batch_idx = 0; size_t blocks_per_batch = grid_dim_0 / batch_size; batch_idx += (block_idx_0) / blocks_per_batch; input_matrix += batch_idx * input_leading_dim_size * input_row_size; size_t input_offset = 0; input_offset += input_leading_dim_size * block_idx_1 * macro_tile_row_size;// each WG works on 64 by 64 block or 32 by 32 input_offset += (block_idx_0 % blocks_per_batch) * macro_tile_col_size; input_matrix += input_offset; for(int i = 0; i < unroll_factor; i++) { //each iteration 256 work items will read from a 4 x 64 subblock //there are 16 iterations size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0; // local_idx_0 + (local_idx_1 % 4) * 16 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor); //transpose happened here lds[subblock_idx_0][subblock_idx_1] = input_matrix[subblock_idx_1 * input_leading_dim_size + subblock_idx_0]; } __syncthreads(); output_matrix += batch_idx * input_col_size * output_leading_dim_size; size_t output_offset = 0; output_offset += output_leading_dim_size * (block_idx_0 % blocks_per_batch) * macro_tile_row_size;//input_row_size == ouput_col_size output_offset += block_idx_1 * macro_tile_col_size; output_matrix += output_offset; for(int i = 0; i < unroll_factor; i++) { size_t subblock_idx_0 = local_idx_0 + (local_idx_1 % reshape_factor) * block_dim_0;// 0-63 size_t subblock_idx_1 = local_idx_1 / reshape_factor + i * (block_dim_1 / reshape_factor);// 0-3, 4-7 ... 60-63 T temp = lds[subblock_idx_1][subblock_idx_0]; output_matrix[subblock_idx_1 * output_leading_dim_size + subblock_idx_0] = temp;//lds[subblock_idx_1][subblock_idx_0]; } }
62e13f300755462191ee4e229d8b03c46928df1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_ROCM #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _Swish(const int nthreads, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) / (T(1) + exp(-__ldg(x + i))); #else y[i] = x[i] / (T(1) + exp(-x[i])); #endif } } template <> __global__ void _Swish<half>(const int nthreads, const half* x, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = __float2half( __half2float(__ldg(x + i)) / (1.f + exp(-__half2float(__ldg(x + i))))); #else y[i] = __float2half(__half2float(x[i]) / (1.f + exp(-__half2float(x[i])))); #endif } } template <typename T> __global__ void _SwishGrad(const int nthreads, const T* dy, const T* x, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = dy[i] * (__ldg(y + i) + (T(1) - __ldg(y + i)) / (T(1) + exp(-x[i]))); #else dx[i] = dy[i] * (y[i] + (T(1) - y[i]) / (T(1) + exp(-x[i]))); #endif } } template <> __global__ void _SwishGrad<half>( const int nthreads, const half* dy, const half* x, const half* y, half* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = __float2half( __half2float(dy[i]) * (__half2float(__ldg(y + i)) + (1.f - __half2float(__ldg(y + i))) / (1.f + exp(-__half2float(x[i]))))); #else dx[i] = __float2half( __half2float(dy[i]) * (__half2float(y[i]) + (1.f - __half2float(y[i])) / (1.f + exp(-__half2float(x[i]))))); #endif } } // SwishGrad } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void Swish<float16, CUDAContext>( const int count, const float16* x, float16* y, CUDAContext* ctx) { hipLaunchKernelGGL(( _Swish), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } template <> void SwishGrad<float16, CUDAContext>( const int count, const float16* dy, const float16* x, const float16* y, float16* dx, CUDAContext* ctx) { hipLaunchKernelGGL(( _SwishGrad), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), count, reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } // SwishGrad #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void Swish<T, CUDAContext>( \ const int count, const T* x, T* y, CUDAContext* ctx) { \ hipLaunchKernelGGL(( _Swish), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ count, x, y); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void SwishGrad<T, CUDAContext>( \ const int count, \ const T* dy, \ const T* x, \ const T* y, \ T* dx, \ CUDAContext* ctx) { \ hipLaunchKernelGGL(( _SwishGrad), dim3(CUDA_BLOCKS(count)), dim3(CUDA_THREADS), 0, ctx->cuda_stream(), \ count, dy, x, y, dx); \ } DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_ROCM
62e13f300755462191ee4e229d8b03c46928df1d.cu
#ifdef USE_CUDA #include "dragon/core/context_cuda.h" #include "dragon/utils/op_kernels.h" namespace dragon { namespace kernel { namespace { template <typename T> __global__ void _Swish(const int nthreads, const T* x, T* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = __ldg(x + i) / (T(1) + exp(-__ldg(x + i))); #else y[i] = x[i] / (T(1) + exp(-x[i])); #endif } } template <> __global__ void _Swish<half>(const int nthreads, const half* x, half* y) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 y[i] = __float2half( __half2float(__ldg(x + i)) / (1.f + exp(-__half2float(__ldg(x + i))))); #else y[i] = __float2half(__half2float(x[i]) / (1.f + exp(-__half2float(x[i])))); #endif } } template <typename T> __global__ void _SwishGrad(const int nthreads, const T* dy, const T* x, const T* y, T* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = dy[i] * (__ldg(y + i) + (T(1) - __ldg(y + i)) / (T(1) + exp(-x[i]))); #else dx[i] = dy[i] * (y[i] + (T(1) - y[i]) / (T(1) + exp(-x[i]))); #endif } } template <> __global__ void _SwishGrad<half>( const int nthreads, const half* dy, const half* x, const half* y, half* dx) { CUDA_1D_KERNEL_LOOP(i, nthreads) { #if __CUDA_ARCH__ >= 350 dx[i] = __float2half( __half2float(dy[i]) * (__half2float(__ldg(y + i)) + (1.f - __half2float(__ldg(y + i))) / (1.f + exp(-__half2float(x[i]))))); #else dx[i] = __float2half( __half2float(dy[i]) * (__half2float(y[i]) + (1.f - __half2float(y[i])) / (1.f + exp(-__half2float(x[i]))))); #endif } } // SwishGrad } // namespace /* ------------------- Launcher Separator ------------------- */ template <> void Swish<float16, CUDAContext>( const int count, const float16* x, float16* y, CUDAContext* ctx) { _Swish<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count, reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } template <> void SwishGrad<float16, CUDAContext>( const int count, const float16* dy, const float16* x, const float16* y, float16* dx, CUDAContext* ctx) { _SwishGrad<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( count, reinterpret_cast<const half*>(dy), reinterpret_cast<const half*>(x), reinterpret_cast<const half*>(y), reinterpret_cast<half*>(dx)); } // SwishGrad #define DEFINE_KERNEL_LAUNCHER(T) \ template <> \ void Swish<T, CUDAContext>( \ const int count, const T* x, T* y, CUDAContext* ctx) { \ _Swish<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ count, x, y); \ } #define DEFINE_GRAD_KERNEL_LAUNCHER(T) \ template <> \ void SwishGrad<T, CUDAContext>( \ const int count, \ const T* dy, \ const T* x, \ const T* y, \ T* dx, \ CUDAContext* ctx) { \ _SwishGrad<<<CUDA_BLOCKS(count), CUDA_THREADS, 0, ctx->cuda_stream()>>>( \ count, dy, x, y, dx); \ } DEFINE_KERNEL_LAUNCHER(float); DEFINE_KERNEL_LAUNCHER(double); DEFINE_GRAD_KERNEL_LAUNCHER(float); DEFINE_GRAD_KERNEL_LAUNCHER(double); #undef DEFINE_KERNEL_LAUNCHER #undef DEFINE_GRAD_KERNEL_LAUNCHER } // namespace kernel } // namespace dragon #endif // USE_CUDA
d9081954f1894277da0f962f8c68eb2cc889c951.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under // the License. // // // // // // // // // // // Authors: Aster JIAN ([email protected]) // Yzx ([email protected]) // Ao LI ([email protected]) // Paul LU ([email protected]) #include <cassert> #include <cstring> #include <vector> #include "NvInfer.h" #include "emb_layer_norm_plugin.h" #include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h" using namespace nvinfer1; namespace fwd { namespace bert { __global__ void fillSBSMaskKernel(const uint32_t warps_m, const uint32_t warps_n, const uint32_t S, const int* inputMaskSB, uint32_t* inputMaskX) { extern __shared__ int shm_mask[]; // S mask elements of this batch const size_t xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const uint32_t threads_per_cta = blockDim.x; const uint32_t xmmas_m = gridDim.x; const uint32_t B = gridDim.y; const uint32_t mi = blockIdx.x; const uint32_t bi = blockIdx.y; const uint32_t tidx = threadIdx.x; const size_t warp = tidx / 32; const size_t warp_n = warp / warps_m; const size_t lane = tidx % 32; const size_t col = warp_n * 16 + lane % 4 * 2; // load the mask corresponding to one batch for (uint32_t si = tidx; si < S; si += threads_per_cta) { // not coalesced to conform to current input format: SxB shm_mask[si] = inputMaskSB[si * B + bi]; } __syncthreads(); uint32_t mask = 0u; for (size_t ni = 0; ni < xmmas_n; ++ni) { const int offset = ni * 16 * warps_n + col; mask |= (shm_mask[offset + 0] == 1.f ? 1u : 0u) << (8 * ni + 0); mask |= (shm_mask[offset + 1] == 1.f ? 1u : 0u) << (8 * ni + 1); mask |= (shm_mask[offset + 0] == 1.f ? 1u : 0u) << (8 * ni + 2); mask |= (shm_mask[offset + 1] == 1.f ? 1u : 0u) << (8 * ni + 3); mask |= (shm_mask[offset + 8] == 1.f ? 1u : 0u) << (8 * ni + 4); mask |= (shm_mask[offset + 9] == 1.f ? 1u : 0u) << (8 * ni + 5); mask |= (shm_mask[offset + 8] == 1.f ? 1u : 0u) << (8 * ni + 6); mask |= (shm_mask[offset + 9] == 1.f ? 1u : 0u) << (8 * ni + 7); } inputMaskX[(bi * xmmas_m + mi) * threads_per_cta + tidx] = mask; } void convertMask(const uint32_t S, const uint32_t B, const uint32_t warps_m, const uint32_t warps_n, const uint32_t warps_k, const int* inputMaskSB, uint32_t* inputMaskX, hipStream_t stream) { const size_t xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); const size_t threads_per_cta = warps_m * warps_n * warps_k * 32; dim3 grid(xmmas_m, B); hipLaunchKernelGGL(( fillSBSMaskKernel), dim3(grid), dim3(threads_per_cta), S * sizeof(int), stream, warps_m, warps_n, S, inputMaskSB, inputMaskX); CUDA_CHECK(hipPeekAtLastError()); } template <unsigned TPB> __global__ void maskIdxKernelSmall(int ld, const int* mask, int* maskIdx) { using BlockReduce = hipcub::BlockReduce<int, TPB>; __shared__ typename BlockReduce::TempStorage tmpStorage; hipcub::Min min; int threadData(ld); // if the mask admits all values if (threadIdx.x < ld) { // mask has input dims {S, B} and gridDims.x is B const int idx = threadIdx.x * gridDim.x + blockIdx.x; const int val = mask[idx]; if (val == 0) // masked position: report thread idx { threadData = threadIdx.x; } } const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); if (threadIdx.x == 0) { maskIdx[blockIdx.x] = minIdx; } } template <unsigned TPB> __global__ void maskIdxKernel(int ld, const int* mask, int* maskIdx) { using BlockReduce = hipcub::BlockReduce<int, TPB>; __shared__ typename BlockReduce::TempStorage tmpStorage; hipcub::Min min; int threadData(ld); // if the mask admits all values for (int i = threadIdx.x; i < ld; i += TPB) { // mask has input dims {S, B} and gridDims.x is B const int idx = i * gridDim.x + blockIdx.x; const int val = mask[idx]; if (val == 0) // masked position: report thread idx { threadData = min(threadData, i); } } const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); if (threadIdx.x == 0) { maskIdx[blockIdx.x] = minIdx; } } int computeMaskIdx(hipStream_t stream, const int S, const int B, const int* mask, int* maskIdx) { // Mask idx is of length B and assumes the valid region is contiguous starting // from the beginning of the sequence // Assume n = BxS if (S <= 32) { hipLaunchKernelGGL(( maskIdxKernelSmall<32>), dim3(B), dim3(32), 0, stream, S, mask, maskIdx); } else if (S <= 128) { hipLaunchKernelGGL(( maskIdxKernelSmall<128>), dim3(B), dim3(128), 0, stream, S, mask, maskIdx); } else if (S == 384) { hipLaunchKernelGGL(( maskIdxKernelSmall<384>), dim3(B), dim3(384), 0, stream, S, mask, maskIdx); } else { hipLaunchKernelGGL(( maskIdxKernel<256>), dim3(B), dim3(256), 0, stream, S, mask, maskIdx); } CUDA_CHECK(hipPeekAtLastError()); return 0; } template <typename T, unsigned TPB> __global__ void embLayerNormKernel(int ld, const int* inputIds, const int* tokenIds, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) { hipcub::Sum pairSum; // 1. lookup word and token of the block // blockIdx.x = position in the sequence // blockIdx.y = batch // gridDim.x = S // gridDim.y = B __shared__ int wordId; __shared__ int tokenId; const T rld = T(1.f) / T(ld); const int seqPos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { wordId = inputIds[seqPos]; tokenId = tokenIds[seqPos]; } __syncthreads(); // 2. load pos/tok/word embeddings and add them toghether // offset into embeddings is given by wordId * hidden_size const int poffset = blockIdx.x * ld; const int woffset = wordId * ld; const int toffset = tokenId * ld; // the output offset is given by b * (S*hidden_size) + s * hidden_size const int outOffset = seqPos * ld; kvp<T> threadData(0, 0); for (int it = threadIdx.x; it < ld; it += TPB) { const T w(wordEmb[woffset + it]); const T t(tokEmb[toffset + it]); const T p(posEmb[poffset + it]); const T val = w + t + p; output[outOffset + it] = val; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); } // 3. layer norm on the sum layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output); } template <typename T> int embSkipLayerNorm(hipStream_t stream, int ld, int B, int S, const int* inputIds, const int* token_ids, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) { constexpr int tpb = 256; const dim3 grid(S, B, 1); const dim3 block(tpb, 1, 1); hipLaunchKernelGGL(( embLayerNormKernel<T, tpb>), dim3(grid), dim3(block), 0, stream, ld, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output); CUDA_CHECK(hipPeekAtLastError()); return 0; } template int embSkipLayerNorm<float>(hipStream_t, int, int, int, const int*, const int*, const float*, const float*, const float*, const float*, const float*, float*); template int embSkipLayerNorm<half>(hipStream_t, int, int, int, const int*, const int*, const float*, const float*, const half*, const half*, const half*, half*); } // namespace bert } // namespace fwd
d9081954f1894277da0f962f8c68eb2cc889c951.cu
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under // the License. // // ╔════════════════════════════════════════════════════════════════════════════════════════╗ // ║──█████████╗───███████╗───████████╗───██╗──────██╗───███████╗───████████╗───████████╗───║ // ║──██╔══════╝──██╔════██╗──██╔════██╗──██║──────██║──██╔════██╗──██╔════██╗──██╔════██╗──║ // ║──████████╗───██║────██║──████████╔╝──██║──█╗──██║──█████████║──████████╔╝──██║────██║──║ // ║──██╔═════╝───██║────██║──██╔════██╗──██║█████╗██║──██╔════██║──██╔════██╗──██║────██║──║ // ║──██║─────────╚███████╔╝──██║────██║──╚████╔████╔╝──██║────██║──██║────██║──████████╔╝──║ // ║──╚═╝──────────╚══════╝───╚═╝────╚═╝───╚═══╝╚═══╝───╚═╝────╚═╝──╚═╝────╚═╝──╚═══════╝───║ // ╚════════════════════════════════════════════════════════════════════════════════════════╝ // // Authors: Aster JIAN ([email protected]) // Yzx ([email protected]) // Ao LI ([email protected]) // Paul LU ([email protected]) #include <cassert> #include <cstring> #include <vector> #include "NvInfer.h" #include "emb_layer_norm_plugin.h" #include "trt_engine/trt_network_crt/plugins/common/bert_plugin_util.h" using namespace nvinfer1; namespace fwd { namespace bert { __global__ void fillSBSMaskKernel(const uint32_t warps_m, const uint32_t warps_n, const uint32_t S, const int* inputMaskSB, uint32_t* inputMaskX) { extern __shared__ int shm_mask[]; // S mask elements of this batch const size_t xmmas_n = (S + 16 * warps_n - 1) / (16 * warps_n); const uint32_t threads_per_cta = blockDim.x; const uint32_t xmmas_m = gridDim.x; const uint32_t B = gridDim.y; const uint32_t mi = blockIdx.x; const uint32_t bi = blockIdx.y; const uint32_t tidx = threadIdx.x; const size_t warp = tidx / 32; const size_t warp_n = warp / warps_m; const size_t lane = tidx % 32; const size_t col = warp_n * 16 + lane % 4 * 2; // load the mask corresponding to one batch for (uint32_t si = tidx; si < S; si += threads_per_cta) { // not coalesced to conform to current input format: SxB shm_mask[si] = inputMaskSB[si * B + bi]; } __syncthreads(); uint32_t mask = 0u; for (size_t ni = 0; ni < xmmas_n; ++ni) { const int offset = ni * 16 * warps_n + col; mask |= (shm_mask[offset + 0] == 1.f ? 1u : 0u) << (8 * ni + 0); mask |= (shm_mask[offset + 1] == 1.f ? 1u : 0u) << (8 * ni + 1); mask |= (shm_mask[offset + 0] == 1.f ? 1u : 0u) << (8 * ni + 2); mask |= (shm_mask[offset + 1] == 1.f ? 1u : 0u) << (8 * ni + 3); mask |= (shm_mask[offset + 8] == 1.f ? 1u : 0u) << (8 * ni + 4); mask |= (shm_mask[offset + 9] == 1.f ? 1u : 0u) << (8 * ni + 5); mask |= (shm_mask[offset + 8] == 1.f ? 1u : 0u) << (8 * ni + 6); mask |= (shm_mask[offset + 9] == 1.f ? 1u : 0u) << (8 * ni + 7); } inputMaskX[(bi * xmmas_m + mi) * threads_per_cta + tidx] = mask; } void convertMask(const uint32_t S, const uint32_t B, const uint32_t warps_m, const uint32_t warps_n, const uint32_t warps_k, const int* inputMaskSB, uint32_t* inputMaskX, cudaStream_t stream) { const size_t xmmas_m = (S + 16 * warps_m - 1) / (16 * warps_m); const size_t threads_per_cta = warps_m * warps_n * warps_k * 32; dim3 grid(xmmas_m, B); fillSBSMaskKernel<<<grid, threads_per_cta, S * sizeof(int), stream>>>(warps_m, warps_n, S, inputMaskSB, inputMaskX); CUDA_CHECK(cudaPeekAtLastError()); } template <unsigned TPB> __global__ void maskIdxKernelSmall(int ld, const int* mask, int* maskIdx) { using BlockReduce = cub::BlockReduce<int, TPB>; __shared__ typename BlockReduce::TempStorage tmpStorage; cub::Min min; int threadData(ld); // if the mask admits all values if (threadIdx.x < ld) { // mask has input dims {S, B} and gridDims.x is B const int idx = threadIdx.x * gridDim.x + blockIdx.x; const int val = mask[idx]; if (val == 0) // masked position: report thread idx { threadData = threadIdx.x; } } const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); if (threadIdx.x == 0) { maskIdx[blockIdx.x] = minIdx; } } template <unsigned TPB> __global__ void maskIdxKernel(int ld, const int* mask, int* maskIdx) { using BlockReduce = cub::BlockReduce<int, TPB>; __shared__ typename BlockReduce::TempStorage tmpStorage; cub::Min min; int threadData(ld); // if the mask admits all values for (int i = threadIdx.x; i < ld; i += TPB) { // mask has input dims {S, B} and gridDims.x is B const int idx = i * gridDim.x + blockIdx.x; const int val = mask[idx]; if (val == 0) // masked position: report thread idx { threadData = min(threadData, i); } } const auto minIdx = BlockReduce(tmpStorage).Reduce(threadData, min); if (threadIdx.x == 0) { maskIdx[blockIdx.x] = minIdx; } } int computeMaskIdx(cudaStream_t stream, const int S, const int B, const int* mask, int* maskIdx) { // Mask idx is of length B and assumes the valid region is contiguous starting // from the beginning of the sequence // Assume n = BxS if (S <= 32) { maskIdxKernelSmall<32><<<B, 32, 0, stream>>>(S, mask, maskIdx); } else if (S <= 128) { maskIdxKernelSmall<128><<<B, 128, 0, stream>>>(S, mask, maskIdx); } else if (S == 384) { maskIdxKernelSmall<384><<<B, 384, 0, stream>>>(S, mask, maskIdx); } else { maskIdxKernel<256><<<B, 256, 0, stream>>>(S, mask, maskIdx); } CUDA_CHECK(cudaPeekAtLastError()); return 0; } template <typename T, unsigned TPB> __global__ void embLayerNormKernel(int ld, const int* inputIds, const int* tokenIds, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) { cub::Sum pairSum; // 1. lookup word and token of the block // blockIdx.x = position in the sequence // blockIdx.y = batch // gridDim.x = S // gridDim.y = B __shared__ int wordId; __shared__ int tokenId; const T rld = T(1.f) / T(ld); const int seqPos = blockIdx.y + blockIdx.x * gridDim.y; if (threadIdx.x == 0) { wordId = inputIds[seqPos]; tokenId = tokenIds[seqPos]; } __syncthreads(); // 2. load pos/tok/word embeddings and add them toghether // offset into embeddings is given by wordId * hidden_size const int poffset = blockIdx.x * ld; const int woffset = wordId * ld; const int toffset = tokenId * ld; // the output offset is given by b * (S*hidden_size) + s * hidden_size const int outOffset = seqPos * ld; kvp<T> threadData(0, 0); for (int it = threadIdx.x; it < ld; it += TPB) { const T w(wordEmb[woffset + it]); const T t(tokEmb[toffset + it]); const T p(posEmb[poffset + it]); const T val = w + t + p; output[outOffset + it] = val; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); } // 3. layer norm on the sum layerNorm<T, T, float, TPB>(threadData, ld, outOffset, beta, gamma, output); } template <typename T> int embSkipLayerNorm(cudaStream_t stream, int ld, int B, int S, const int* inputIds, const int* token_ids, const float* beta, const float* gamma, const T* wordEmb, const T* posEmb, const T* tokEmb, T* output) { constexpr int tpb = 256; const dim3 grid(S, B, 1); const dim3 block(tpb, 1, 1); embLayerNormKernel<T, tpb><<<grid, block, 0, stream>>>(ld, inputIds, token_ids, beta, gamma, wordEmb, posEmb, tokEmb, output); CUDA_CHECK(cudaPeekAtLastError()); return 0; } template int embSkipLayerNorm<float>(cudaStream_t, int, int, int, const int*, const int*, const float*, const float*, const float*, const float*, const float*, float*); template int embSkipLayerNorm<half>(cudaStream_t, int, int, int, const int*, const int*, const float*, const float*, const half*, const half*, const half*, half*); } // namespace bert } // namespace fwd
19c86ca8762f5be6246764b1b5629536c7847557.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // // This file implements ClusterHelper::set. #include "dca/phys/dca_step/cluster_solver/shared_tools/cluster_helper.cuh" #include <mutex> #include <array> #include "dca/platform/dca_gpu.h" #include "dca/linalg/util/allocators/vectors_typedefs.hpp" namespace dca { namespace phys { namespace solver { namespace details { // dca::phys::solver::details:: __device__ __constant__ ClusterHelper cluster_real_helper; __device__ __constant__ ClusterHelper cluster_momentum_helper; void ClusterHelper::set(int nc, const int* add, int lda, const int* sub, int lds, bool momentum) { static std::array<std::once_flag, 2> flags; std::call_once(flags[momentum], [=]() { ClusterHelper host_helper; host_helper.nc_ = nc; auto compact_transfer = [=](const int* matrix, int ldm, int** dest) { linalg::util::HostVector<int> compact(nc * nc); for (int j = 0; j < nc; ++j) for (int i = 0; i < nc; ++i) compact[i + nc * j] = matrix[i + ldm * j]; hipMalloc(dest, sizeof(int) * lds * nc); hipMemcpy(*dest, compact.data(), sizeof(int) * nc * nc, hipMemcpyHostToDevice); }; compact_transfer(add, lda, const_cast<int**>(&host_helper.add_matrix_)); compact_transfer(sub, lds, const_cast<int**>(&host_helper.sub_matrix_)); if (momentum) { hipMemcpyToSymbol(cluster_momentum_helper, &host_helper, sizeof(ClusterHelper)); } else { hipMemcpyToSymbol(cluster_real_helper, &host_helper, sizeof(ClusterHelper)); } }); } } // namespace details } // namespace solver } // namespace phys } // namespace dca
19c86ca8762f5be6246764b1b5629536c7847557.cu
// Copyright (C) 2018 ETH Zurich // Copyright (C) 2018 UT-Battelle, LLC // All rights reserved. // // See LICENSE.txt for terms of usage. // See CITATION.txt for citation guidelines if you use this code for scientific publications. // // Author: Giovanni Balduzzi ([email protected]) // // This file implements ClusterHelper::set. #include "dca/phys/dca_step/cluster_solver/shared_tools/cluster_helper.cuh" #include <mutex> #include <array> #include "dca/platform/dca_gpu.h" #include "dca/linalg/util/allocators/vectors_typedefs.hpp" namespace dca { namespace phys { namespace solver { namespace details { // dca::phys::solver::details:: __device__ __constant__ ClusterHelper cluster_real_helper; __device__ __constant__ ClusterHelper cluster_momentum_helper; void ClusterHelper::set(int nc, const int* add, int lda, const int* sub, int lds, bool momentum) { static std::array<std::once_flag, 2> flags; std::call_once(flags[momentum], [=]() { ClusterHelper host_helper; host_helper.nc_ = nc; auto compact_transfer = [=](const int* matrix, int ldm, int** dest) { linalg::util::HostVector<int> compact(nc * nc); for (int j = 0; j < nc; ++j) for (int i = 0; i < nc; ++i) compact[i + nc * j] = matrix[i + ldm * j]; cudaMalloc(dest, sizeof(int) * lds * nc); cudaMemcpy(*dest, compact.data(), sizeof(int) * nc * nc, cudaMemcpyHostToDevice); }; compact_transfer(add, lda, const_cast<int**>(&host_helper.add_matrix_)); compact_transfer(sub, lds, const_cast<int**>(&host_helper.sub_matrix_)); if (momentum) { cudaMemcpyToSymbol(cluster_momentum_helper, &host_helper, sizeof(ClusterHelper)); } else { cudaMemcpyToSymbol(cluster_real_helper, &host_helper, sizeof(ClusterHelper)); } }); } } // namespace details } // namespace solver } // namespace phys } // namespace dca
fd629697b1bc3d89563ff7ac88e1e4a605bf1db2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask, bool use_local_idx) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart0 = ph * stride_h - pad_h; int wstart0 = pw * stride_w - pad_w; const int hend = min(hstart0 + kernel_h, height); const int wend = min(wstart0 + kernel_w, width); int hstart = max(hstart0, 0); int wstart = max(wstart0, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; int stored_maxidx = use_local_idx ? ( (maxidx/width-hstart0)*kernel_w+(maxidx%width-wstart0) ):(maxidx); if (mask) { mask[index] = stored_maxidx; } else { top_mask[index] = static_cast<Dtype>( stored_maxidx ); } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void SumPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype sumval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { sumval += bottom_slice[h * width + w]; } } top_data[index] = sumval; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> __global__ void FixPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int fix_x, const int fix_y, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int h = hstart + fix_y; int w = wstart + fix_x; if (h<0 || h>=height || w<0 || w>=width) { top_data[index] = Dtype(0); } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; top_data[index] = bottom_slice[h * width + w]; } } } template <typename Dtype> __global__ void SwitchPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* bottom_switch, Dtype* const top_data, bool use_local_idx) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; if (use_local_idx) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int local_index = bottom_switch[index]; int h = hstart+local_index/kernel_w; int w = wstart+local_index%kernel_w; if (h<0 || h>=height || w<0 || w>=width) { top_data[index] = Dtype(0); } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; top_data[index] = bottom_slice[h * width + w]; } } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const int bottom_index = bottom_switch[index]; top_data[index] = bottom_slice[ bottom_index ]; } } } template <typename Dtype> __global__ void SoftSwitchPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* bottom_switch, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart0 = ph * stride_h - pad_h; int wstart0 = pw * stride_w - pad_w; int hend = min(hstart0 + kernel_h, height + pad_h); int wend = min(wstart0 + kernel_w, width + pad_w); int hstart = max(hstart0, 0); int wstart = max(wstart0, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const int pooled_geo_count = pooled_width*pooled_height; const Dtype* switch_slice = bottom_switch + (n*channels+c)*kernel_w*kernel_h*pooled_geo_count; const int slided_index = ph*pooled_width+pw; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int local_index = (h-hstart0)*kernel_w+(w-wstart0); int switch_index = local_index*pooled_geo_count + slided_index; aveval += bottom_slice[h * width + w] * switch_slice[switch_index]; } } top_data[index] = aveval; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL ); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_SUM: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SumPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTrain<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolForwardTest<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; case PoolingParameter_PoolMethod_FIX: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( FixPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, fix_x_, fix_y_, top_data); break; case PoolingParameter_PoolMethod_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); hipLaunchKernelGGL(( SwitchPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, top_data, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL); } break; case PoolingParameter_PoolMethod_SOFT_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); hipLaunchKernelGGL(( SoftSwitchPoolForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights, bool use_local_idx ) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (use_local_idx) { if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { int hstart = ph * stride_h - pad_h; for (int pw = pwstart; pw < pwend; ++pw) { int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); if (mask_slice[ph * pooled_width + pw] == local_index) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { int hstart = ph * stride_h - pad_h; for (int pw = pwstart; pw < pwend; ++pw) { int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); if (top_mask_slice[ph * pooled_width + pw] == local_index) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } } else { if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; bwgt += Dtype(1.)/Dtype(pool_size); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void SumPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); bwgt += (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void FixPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int fix_x, const int fix_y, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // index is the pooled index (it is safe to do it here) const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int h = hstart + fix_y; int w = wstart + fix_x; if (!(h<0 || h>=height || w<0 || w>=width)) { const int depooled_index = (n * channels + c) * height * width + h * width + w; bottom_diff[depooled_index] = top_diff[index]; bweights[depooled_index] = Dtype(1.); } } } template <typename Dtype> __global__ void SoftSwitchPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* const bottom_switch, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const int pooled_geo_count = pooled_height * pooled_width; const int ch_count = n * channels + c; const int kernel_count = kernel_w * kernel_h; const Dtype* const top_diff_slice = top_diff + ch_count * pooled_geo_count; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); int top_sliced_index = ph * pooled_width + pw; int switch_index = (ch_count * kernel_count + local_index) * pooled_geo_count + top_sliced_index; gradient += top_diff_slice[top_sliced_index] * bottom_switch[switch_index]; bwgt += bottom_switch[switch_index]; } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } Dtype* bweights = backward_weights_.mutable_gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_SWITCH: top_mask = bottom[1]->gpu_data(); case PoolingParameter_PoolMethod_MAX: if (!top_mask) { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } } // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( AvePoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_SUM: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SumPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( StoPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_FIX: // NOLINT_NEXT_LINE(whitespace/operators) { const int top_count = top[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); caffe_gpu_set(count, Dtype(0.), bweights); hipLaunchKernelGGL(( FixPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top_count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, fix_x_, fix_y_, bottom_diff, bweights); } break; case PoolingParameter_PoolMethod_SOFT_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); hipLaunchKernelGGL(( SoftSwitchPoolBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, bottom_diff, bweights); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
fd629697b1bc3d89563ff7ac88e1e4a605bf1db2.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/pooling_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void MaxPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask, Dtype* top_mask, bool use_local_idx) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart0 = ph * stride_h - pad_h; int wstart0 = pw * stride_w - pad_w; const int hend = min(hstart0 + kernel_h, height); const int wend = min(wstart0 + kernel_w, width); int hstart = max(hstart0, 0); int wstart = max(wstart0, 0); Dtype maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (bottom_slice[h * width + w] > maxval) { maxidx = h * width + w; maxval = bottom_slice[maxidx]; } } } top_data[index] = maxval; int stored_maxidx = use_local_idx ? ( (maxidx/width-hstart0)*kernel_w+(maxidx%width-wstart0) ):(maxidx); if (mask) { mask[index] = stored_maxidx; } else { top_mask[index] = static_cast<Dtype>( stored_maxidx ); } } } template <typename Dtype> __global__ void AvePoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } template <typename Dtype> __global__ void SumPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); Dtype sumval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { sumval += bottom_slice[h * width + w]; } } top_data[index] = sumval; } } template <typename Dtype> __global__ void StoPoolForwardTrain(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const rand_idx, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); Dtype cumsum = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; } } const float thres = rand_idx[index] * cumsum; // Second pass: get value, and set index. cumsum = 0; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; if (cumsum >= thres) { rand_idx[index] = ((n * channels + c) * height + h) * width + w; top_data[index] = bottom_slice[h * width + w]; return; } } } } } template <typename Dtype> __global__ void StoPoolForwardTest(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; const int hstart = ph * stride_h; const int hend = min(hstart + kernel_h, height); const int wstart = pw * stride_w; const int wend = min(wstart + kernel_w, width); // We set cumsum to be 0 to avoid divide-by-zero problems Dtype cumsum = FLT_MIN; Dtype cumvalues = 0.; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; // First pass: get sum for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { cumsum += bottom_slice[h * width + w]; cumvalues += bottom_slice[h * width + w] * bottom_slice[h * width + w]; } } top_data[index] = cumvalues / cumsum; } } template <typename Dtype> __global__ void FixPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int fix_x, const int fix_y, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int h = hstart + fix_y; int w = wstart + fix_x; if (h<0 || h>=height || w<0 || w>=width) { top_data[index] = Dtype(0); } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; top_data[index] = bottom_slice[h * width + w]; } } } template <typename Dtype> __global__ void SwitchPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* bottom_switch, Dtype* const top_data, bool use_local_idx) { CUDA_KERNEL_LOOP(index, nthreads) { const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; if (use_local_idx) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int local_index = bottom_switch[index]; int h = hstart+local_index/kernel_w; int w = wstart+local_index%kernel_w; if (h<0 || h>=height || w<0 || w>=width) { top_data[index] = Dtype(0); } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; top_data[index] = bottom_slice[h * width + w]; } } else { const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const int bottom_index = bottom_switch[index]; top_data[index] = bottom_slice[ bottom_index ]; } } } template <typename Dtype> __global__ void SoftSwitchPoolForward(const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* bottom_switch, Dtype* const top_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart0 = ph * stride_h - pad_h; int wstart0 = pw * stride_w - pad_w; int hend = min(hstart0 + kernel_h, height + pad_h); int wend = min(wstart0 + kernel_w, width + pad_w); int hstart = max(hstart0, 0); int wstart = max(wstart0, 0); hend = min(hend, height); wend = min(wend, width); Dtype aveval = 0; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; const int pooled_geo_count = pooled_width*pooled_height; const Dtype* switch_slice = bottom_switch + (n*channels+c)*kernel_w*kernel_h*pooled_geo_count; const int slided_index = ph*pooled_width+pw; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { int local_index = (h-hstart0)*kernel_w+(w-wstart0); int switch_index = local_index*pooled_geo_count + slided_index; aveval += bottom_slice[h * width + w] * switch_slice[switch_index]; } } top_data[index] = aveval; } } template <typename Dtype> void PoolingLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); int count = top[0]->count(); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; int* mask = NULL; Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_MAX: if (use_top_mask) { top_mask = top[1]->mutable_gpu_data(); } else { mask = max_idx_.mutable_gpu_data(); } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data, mask, top_mask, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL ); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_SUM: // NOLINT_NEXT_LINE(whitespace/operators) SumPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); break; case PoolingParameter_PoolMethod_STOCHASTIC: if (this->phase_ == TRAIN) { // We need to create the random index as well. caffe_gpu_rng_uniform(count, Dtype(0), Dtype(1), rand_idx_.mutable_gpu_data()); // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTrain<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, rand_idx_.mutable_gpu_data(), top_data); } else { // NOLINT_NEXT_LINE(whitespace/operators) StoPoolForwardTest<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, top_data); } break; case PoolingParameter_PoolMethod_FIX: // NOLINT_NEXT_LINE(whitespace/operators) FixPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, fix_x_, fix_y_, top_data); break; case PoolingParameter_PoolMethod_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); SwitchPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, top_data, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL); } break; case PoolingParameter_PoolMethod_SOFT_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); SoftSwitchPoolForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, top_data); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } template <typename Dtype> __global__ void MaxPoolBackward(const int nthreads, const Dtype* const top_diff, const int* const mask, const Dtype* const top_mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights, bool use_local_idx ) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; if (use_local_idx) { if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { int hstart = ph * stride_h - pad_h; for (int pw = pwstart; pw < pwend; ++pw) { int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); if (mask_slice[ph * pooled_width + pw] == local_index) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { int hstart = ph * stride_h - pad_h; for (int pw = pwstart; pw < pwend; ++pw) { int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); if (top_mask_slice[ph * pooled_width + pw] == local_index) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } } else { if (mask) { const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } else { const Dtype* const top_mask_slice = top_mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (top_mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } } } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void AvePoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; bwgt += Dtype(1.)/Dtype(pool_size); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void SumPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); gradient += top_diff_slice[ph * pooled_width + pw]; bwgt += Dtype(1.); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void StoPoolBackward(const int nthreads, const Dtype* const rand_idx, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const Dtype* const rand_idx_slice = rand_idx + (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { gradient += top_diff_slice[ph * pooled_width + pw] * (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); bwgt += (index == static_cast<int>(rand_idx_slice[ph * pooled_width + pw])); } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> __global__ void FixPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const int fix_x, const int fix_y, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // index is the pooled index (it is safe to do it here) const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int h = hstart + fix_y; int w = wstart + fix_x; if (!(h<0 || h>=height || w<0 || w>=width)) { const int depooled_index = (n * channels + c) * height * width + h * width + w; bottom_diff[depooled_index] = top_diff[index]; bweights[depooled_index] = Dtype(1.); } } } template <typename Dtype> __global__ void SoftSwitchPoolBackward(const int nthreads, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, const Dtype* const bottom_switch, Dtype* const bottom_diff, Dtype* const bweights) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); Dtype gradient = 0; Dtype bwgt = 0; const int pooled_geo_count = pooled_height * pooled_width; const int ch_count = n * channels + c; const int kernel_count = kernel_w * kernel_h; const Dtype* const top_diff_slice = top_diff + ch_count * pooled_geo_count; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int local_index = (h-hstart)*kernel_w+(w-wstart); int top_sliced_index = ph * pooled_width + pw; int switch_index = (ch_count * kernel_count + local_index) * pooled_geo_count + top_sliced_index; gradient += top_diff_slice[top_sliced_index] * bottom_switch[switch_index]; bwgt += bottom_switch[switch_index]; } } bottom_diff[index] = gradient; bweights[index] = bwgt; } } template <typename Dtype> void PoolingLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!propagate_down[0]) { return; } Dtype* bweights = backward_weights_.mutable_gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); // We'll output the mask to top[1] if it's of size >1. const bool use_top_mask = top.size() > 1; const int* mask = NULL; const Dtype* top_mask = NULL; switch (this->layer_param_.pooling_param().pool()) { case PoolingParameter_PoolMethod_SWITCH: top_mask = bottom[1]->gpu_data(); case PoolingParameter_PoolMethod_MAX: if (!top_mask) { if (use_top_mask) { top_mask = top[1]->gpu_data(); } else { mask = max_idx_.gpu_data(); } } // NOLINT_NEXT_LINE(whitespace/operators) MaxPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, mask, top_mask, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights, mask_index_type_ == PoolingParameter_MaskIndexType_LOCAL); break; case PoolingParameter_PoolMethod_AVE: // NOLINT_NEXT_LINE(whitespace/operators) AvePoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_SUM: // NOLINT_NEXT_LINE(whitespace/operators) SumPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_STOCHASTIC: // NOLINT_NEXT_LINE(whitespace/operators) StoPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, rand_idx_.gpu_data(), top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, bottom_diff, bweights); break; case PoolingParameter_PoolMethod_FIX: // NOLINT_NEXT_LINE(whitespace/operators) { const int top_count = top[0]->count(); caffe_gpu_set(count, Dtype(0.), bottom_diff); caffe_gpu_set(count, Dtype(0.), bweights); FixPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( top_count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, fix_x_, fix_y_, bottom_diff, bweights); } break; case PoolingParameter_PoolMethod_SOFT_SWITCH: // NOLINT_NEXT_LINE(whitespace/operators) { const Dtype* bottom_switch = bottom[1]->gpu_data(); SoftSwitchPoolBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, top[0]->num(), channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_switch, bottom_diff, bweights); } break; default: LOG(FATAL) << "Unknown pooling method."; } CUDA_POST_KERNEL_CHECK; } INSTANTIATE_LAYER_GPU_FUNCS(PoolingLayer); } // namespace caffe
34a371fafdaf322752e707b51c0a47927375bc78.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This program demonstrates the basics of working with cuda. We use the GPU to add two arrays. We also introduce cuda's approach to error handling and timing using cuda Events. This is the main program. You should also look at the header add.h for the important declarations, and then look at add.cu to see how to define functions that execute on the GPU. */ #include <iostream> #include "add.h" int main() { int matSize = 1000; int sequential = 1; int blocks = 1; int threads = 1; //get array dimensions std::cout << "Please enter the dimensions of the matrix (1000<=matSize<=10000):"; std::cin >> matSize; //std::cout << "you input: " << matSize << std::endl; //get if we are using cuda or sequential addtion std::cout << "Sequential or CUDA?(1=Sequential, 0=CUDA):"; std::cin >> sequential; if(sequential < 1){ std::cout << "Please enter the number of blocks to be used:"; std::cin >> blocks; if(blocks < 1 ){//|| blocks > 256){ std::cout << "invalid block number, using default of 256 (Max 65535)." << std::endl; blocks = matSize*matSize; } std::cout << "Please enter the number of threads per block:"; std::cin >> threads; if(threads < 1 ){//|| threads > 32){ std::cout << "invalid thread number, using default of 32 (Max 65535)." << std::endl; threads = 32; } /*if(blocks*threads != matSize*matSize){ std::cout << "insufficient blocks and threads used, switching to default." << std::endl; blocks = matSize*matSize; threads = 1; }*/ } //int* a[matSize]; //int* b[matSize]; //int* c[matSize]; /* These will point to memory on the GPU - notice the correspondence between these pointers and the arrays declared above. */ //int (*dev_a)[matSize], (*dev_b)[matSize], (*dev_c)[matSize]; int **dev_a, **dev_b, **dev_c; /* These calls allocate memory on the GPU (also called the device). This is similar to C's malloc, except that instead of directly returning a pointer to the allocated memory, hipMalloc returns the pointer through its first argument, which must be a void**. The second argument is the number of bytes we want to allocate. NB: the return value of hipMalloc (like most cuda functions) is an error code. Strictly speaking, we should check this value and perform error handling if anything went wrong. We do this for the first call to hipMalloc so you can see what it looks like, but for all other function calls we just point out that you should do error checking. Actually, a good idea would be to wrap this error checking in a function or macro, which is what the Cuda By Example book does. */ //std::cout << "1" << std::endl; hipError_t err = hipMallocManaged( (void**) &dev_a, (matSize*matSize) * sizeof(int)); if (err != hipSuccess) { std::cerr << "Error: " << hipGetErrorString(err) << std::endl; exit(1); } hipMallocManaged( (void**) &dev_b, (matSize*matSize) * sizeof(int)); hipMallocManaged( (void**) &dev_c, (matSize*matSize) * sizeof(int)); // These lines just fill the host arrays with some data so we can do // something interesting. Well, so we can add two arrays. //std::cout << "2" << std::endl; for(int iter = 0; iter<matSize;iter++){ hipMallocManaged( (void**) &(dev_a[iter]), (matSize)*sizeof(int)); hipMallocManaged( (void**) &(dev_b[iter]), (matSize)*sizeof(int)); hipMallocManaged( (void**) &(dev_c[iter]), (matSize)*sizeof(int)); for(int cur = 0; cur<matSize;cur++){ dev_a[iter][cur] = iter*cur; dev_b[iter][cur] = iter*cur; dev_c[iter][cur] = 0; } } //std::cout << "3" << std::endl; /* The following code is responsible for handling timing for code that executes on the GPU. The cuda approach to this problem uses events. For timing purposes, an event is essentially a point in time. We create events for the beginning and end points of the process we want to time. When we want to start timing, we call hipEventRecord. In this case, we want to record the time it takes to transfer data to the GPU, perform some computations, and transfer data back. */ hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord( start, 0 ); //sequential addition if(sequential > 0){ // Arrays on the host (CPU) //std::cout << "4" << std::endl; /*for(int iter = 0; iter<matSize;iter++){ dev_a[iter] = new int [matSize]; dev_b[iter] = new int [matSize]; dev_c[iter] = new int [matSize]; for(int cur = 0; cur<matSize;cur++){ dev_a[iter][cur] = iter*cur; dev_b[iter][cur] = iter*cur; dev_c[iter][cur] = 0; } }*/ //std::cout << "5" << std::endl; for(int x = 0; x<matSize; x++){ for(int y=0; y<matSize; y++){ dev_c[x][y] = dev_a[x][y] * dev_b[y][x]; } } //std::cout << "6" << std::endl; }else{ /* Once we have host arrays containing data and we have allocated memory on the GPU, we have to transfer data from the host to the device. Again, notice the similarity to C's memcpy function. The first argument is the destination of the copy - in this case a pointer to memory allocated on the device. The second argument is the source of the copy. The third argument is the number of bytes we want to copy. The last argument is a constant that tells hipMemcpy the direction of the transfer. */ //for(int iter = 0; iter < matSize; iter++){ //hipMemcpy(dev_a, a[iter], matSize * sizeof(int), hipMemcpyHostToDevice); //hipMemcpy(dev_b, b[iter], matSize * sizeof(int), hipMemcpyHostToDevice); //hipMemcpy(dev_c, c[iter], matSize * sizeof(int), hipMemcpyHostToDevice); /* FINALLY we get to run some code on the GPU. At this point, if you haven't looked at add.cu (in this folder), you should. The comments in that file explain what the add function does, so here let's focus on how add is being called. The first thing to notice is the <<<...>>>, which you should recognize as _not_ being standard C. This syntactic extension tells nvidia's cuda compiler how to parallelize the execution of the function. We'll get into details as the course progresses, but for we'll say that <<<N, 1>>> is creating N _blocks_ of 1 _thread_ each. Each of these threads is executing add with a different data element (details of the indexing are in add.cu). In larger programs, you will typically have many more blocks, and each block will have many threads. Each thread will handle a different piece of data, and many threads can execute at the same time. This is how cuda can get such large speedups. */ //add<<<blocks, threads>>>(dev_a, dev_b, dev_c); std::cout << "parallel in" << std::endl; hipLaunchKernelGGL(( mult), dim3(blocks), dim3(threads), 0, 0, matSize,dev_a, dev_b, dev_c); hipDeviceSynchronize(); std::cout << "parallel out" << std::endl; /* Unfortunately, the GPU is to some extent a black box. In order to print the results of our call to add, we have to transfer the data back to the host. We do that with a call to hipMemcpy, which is just like the hipMemcpy calls above, except that the direction of the transfer (given by the last argument) is reversed. In a real program we would want to check the error code returned by this function. */ //hipMemcpy(c[iter], dev_c, matSize * sizeof(int), hipMemcpyDeviceToHost); //} } /* This is the other end of the timing process. We record an event, synchronize on it, and then figure out the difference in time between the start and the stop. We have to call hipEventSynchronize before we can safely _read_ the value of the stop event. This is because the GPU may not have actually written to the event until all other work has finished. */ hipEventRecord( end, 0 ); hipEventSynchronize( end ); float elapsedTime; hipEventElapsedTime( &elapsedTime, start, end ); /* Let's check that the results are what we expect. */ for (int i = 0; i < matSize; ++i) { for(int j = 0; j < matSize; j++){ if (dev_c[i][j] != dev_a[i][j] * dev_b[j][i]) { std::cerr << "Oh no! Something went wrong. :(" << std::endl; std::cout << "Your program took: " << elapsedTime << " ms." << std::endl; std::cout << "Values at error location were: a: " << dev_a[i][j] << " b: " << dev_b[j][i] << " c: " << dev_c[i][j] << " i: " << i << " j: " << j << std::endl; // clean up events - we should check for error codes here. hipEventDestroy( start ); hipEventDestroy( end ); // clean up device pointers - just like free in C. We don't have // to check error codes for this one. hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); exit(1); } } } /* Let's let the user know that everything is ok and then display some information about the times we recorded above. */ std::cout << "Yay! Your program's results are correct." << std::endl; std::cout << "Your program took: " << elapsedTime << " ms." << std::endl; // Cleanup in the event of success. hipEventDestroy( start ); hipEventDestroy( end ); hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); }
34a371fafdaf322752e707b51c0a47927375bc78.cu
/* This program demonstrates the basics of working with cuda. We use the GPU to add two arrays. We also introduce cuda's approach to error handling and timing using cuda Events. This is the main program. You should also look at the header add.h for the important declarations, and then look at add.cu to see how to define functions that execute on the GPU. */ #include <iostream> #include "add.h" int main() { int matSize = 1000; int sequential = 1; int blocks = 1; int threads = 1; //get array dimensions std::cout << "Please enter the dimensions of the matrix (1000<=matSize<=10000):"; std::cin >> matSize; //std::cout << "you input: " << matSize << std::endl; //get if we are using cuda or sequential addtion std::cout << "Sequential or CUDA?(1=Sequential, 0=CUDA):"; std::cin >> sequential; if(sequential < 1){ std::cout << "Please enter the number of blocks to be used:"; std::cin >> blocks; if(blocks < 1 ){//|| blocks > 256){ std::cout << "invalid block number, using default of 256 (Max 65535)." << std::endl; blocks = matSize*matSize; } std::cout << "Please enter the number of threads per block:"; std::cin >> threads; if(threads < 1 ){//|| threads > 32){ std::cout << "invalid thread number, using default of 32 (Max 65535)." << std::endl; threads = 32; } /*if(blocks*threads != matSize*matSize){ std::cout << "insufficient blocks and threads used, switching to default." << std::endl; blocks = matSize*matSize; threads = 1; }*/ } //int* a[matSize]; //int* b[matSize]; //int* c[matSize]; /* These will point to memory on the GPU - notice the correspondence between these pointers and the arrays declared above. */ //int (*dev_a)[matSize], (*dev_b)[matSize], (*dev_c)[matSize]; int **dev_a, **dev_b, **dev_c; /* These calls allocate memory on the GPU (also called the device). This is similar to C's malloc, except that instead of directly returning a pointer to the allocated memory, cudaMalloc returns the pointer through its first argument, which must be a void**. The second argument is the number of bytes we want to allocate. NB: the return value of cudaMalloc (like most cuda functions) is an error code. Strictly speaking, we should check this value and perform error handling if anything went wrong. We do this for the first call to cudaMalloc so you can see what it looks like, but for all other function calls we just point out that you should do error checking. Actually, a good idea would be to wrap this error checking in a function or macro, which is what the Cuda By Example book does. */ //std::cout << "1" << std::endl; cudaError_t err = cudaMallocManaged( (void**) &dev_a, (matSize*matSize) * sizeof(int)); if (err != cudaSuccess) { std::cerr << "Error: " << cudaGetErrorString(err) << std::endl; exit(1); } cudaMallocManaged( (void**) &dev_b, (matSize*matSize) * sizeof(int)); cudaMallocManaged( (void**) &dev_c, (matSize*matSize) * sizeof(int)); // These lines just fill the host arrays with some data so we can do // something interesting. Well, so we can add two arrays. //std::cout << "2" << std::endl; for(int iter = 0; iter<matSize;iter++){ cudaMallocManaged( (void**) &(dev_a[iter]), (matSize)*sizeof(int)); cudaMallocManaged( (void**) &(dev_b[iter]), (matSize)*sizeof(int)); cudaMallocManaged( (void**) &(dev_c[iter]), (matSize)*sizeof(int)); for(int cur = 0; cur<matSize;cur++){ dev_a[iter][cur] = iter*cur; dev_b[iter][cur] = iter*cur; dev_c[iter][cur] = 0; } } //std::cout << "3" << std::endl; /* The following code is responsible for handling timing for code that executes on the GPU. The cuda approach to this problem uses events. For timing purposes, an event is essentially a point in time. We create events for the beginning and end points of the process we want to time. When we want to start timing, we call cudaEventRecord. In this case, we want to record the time it takes to transfer data to the GPU, perform some computations, and transfer data back. */ cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord( start, 0 ); //sequential addition if(sequential > 0){ // Arrays on the host (CPU) //std::cout << "4" << std::endl; /*for(int iter = 0; iter<matSize;iter++){ dev_a[iter] = new int [matSize]; dev_b[iter] = new int [matSize]; dev_c[iter] = new int [matSize]; for(int cur = 0; cur<matSize;cur++){ dev_a[iter][cur] = iter*cur; dev_b[iter][cur] = iter*cur; dev_c[iter][cur] = 0; } }*/ //std::cout << "5" << std::endl; for(int x = 0; x<matSize; x++){ for(int y=0; y<matSize; y++){ dev_c[x][y] = dev_a[x][y] * dev_b[y][x]; } } //std::cout << "6" << std::endl; }else{ /* Once we have host arrays containing data and we have allocated memory on the GPU, we have to transfer data from the host to the device. Again, notice the similarity to C's memcpy function. The first argument is the destination of the copy - in this case a pointer to memory allocated on the device. The second argument is the source of the copy. The third argument is the number of bytes we want to copy. The last argument is a constant that tells cudaMemcpy the direction of the transfer. */ //for(int iter = 0; iter < matSize; iter++){ //cudaMemcpy(dev_a, a[iter], matSize * sizeof(int), cudaMemcpyHostToDevice); //cudaMemcpy(dev_b, b[iter], matSize * sizeof(int), cudaMemcpyHostToDevice); //cudaMemcpy(dev_c, c[iter], matSize * sizeof(int), cudaMemcpyHostToDevice); /* FINALLY we get to run some code on the GPU. At this point, if you haven't looked at add.cu (in this folder), you should. The comments in that file explain what the add function does, so here let's focus on how add is being called. The first thing to notice is the <<<...>>>, which you should recognize as _not_ being standard C. This syntactic extension tells nvidia's cuda compiler how to parallelize the execution of the function. We'll get into details as the course progresses, but for we'll say that <<<N, 1>>> is creating N _blocks_ of 1 _thread_ each. Each of these threads is executing add with a different data element (details of the indexing are in add.cu). In larger programs, you will typically have many more blocks, and each block will have many threads. Each thread will handle a different piece of data, and many threads can execute at the same time. This is how cuda can get such large speedups. */ //add<<<blocks, threads>>>(dev_a, dev_b, dev_c); std::cout << "parallel in" << std::endl; mult<<<blocks, threads>>>(matSize,dev_a, dev_b, dev_c); cudaDeviceSynchronize(); std::cout << "parallel out" << std::endl; /* Unfortunately, the GPU is to some extent a black box. In order to print the results of our call to add, we have to transfer the data back to the host. We do that with a call to cudaMemcpy, which is just like the cudaMemcpy calls above, except that the direction of the transfer (given by the last argument) is reversed. In a real program we would want to check the error code returned by this function. */ //cudaMemcpy(c[iter], dev_c, matSize * sizeof(int), cudaMemcpyDeviceToHost); //} } /* This is the other end of the timing process. We record an event, synchronize on it, and then figure out the difference in time between the start and the stop. We have to call cudaEventSynchronize before we can safely _read_ the value of the stop event. This is because the GPU may not have actually written to the event until all other work has finished. */ cudaEventRecord( end, 0 ); cudaEventSynchronize( end ); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, end ); /* Let's check that the results are what we expect. */ for (int i = 0; i < matSize; ++i) { for(int j = 0; j < matSize; j++){ if (dev_c[i][j] != dev_a[i][j] * dev_b[j][i]) { std::cerr << "Oh no! Something went wrong. :(" << std::endl; std::cout << "Your program took: " << elapsedTime << " ms." << std::endl; std::cout << "Values at error location were: a: " << dev_a[i][j] << " b: " << dev_b[j][i] << " c: " << dev_c[i][j] << " i: " << i << " j: " << j << std::endl; // clean up events - we should check for error codes here. cudaEventDestroy( start ); cudaEventDestroy( end ); // clean up device pointers - just like free in C. We don't have // to check error codes for this one. cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); exit(1); } } } /* Let's let the user know that everything is ok and then display some information about the times we recorded above. */ std::cout << "Yay! Your program's results are correct." << std::endl; std::cout << "Your program took: " << elapsedTime << " ms." << std::endl; // Cleanup in the event of success. cudaEventDestroy( start ); cudaEventDestroy( end ); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); }
f8580a25812fd7ebef41c489dfbdd2996b7f6a0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #include "ppl/cv/cuda/resize.h" #include "utility.hpp" #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define INC(x, l) ((x + 1) >= (l) ? (x) : ((x) + 1)) #define INTER_RESIZE_COEF_BITS 11 #define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) #define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { template <typename T> __DEVICE__ T bilinearSampleUchar(T t[][2], int x0, int x1, int y0, int y1); template <> __DEVICE__ uchar2 bilinearSampleUchar(uchar2 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int2 ret; uchar2 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar3 bilinearSampleUchar(uchar3 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int3 ret; uchar3 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3; final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar4 bilinearSampleUchar(uchar4 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int4 ret; uchar4 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3; final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.w = t[0][0].w * a0 + t[0][1].w * a1 + t[1][0].w * a2 + t[1][1].w * a3; final_ret.w = (ret.w + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } /***************************** ResizeLinear() ******************************/ __global__ void resizeLinearKernel(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fy = ((element_y + 0.5f) * row_scale - 0.5f); float fx = ((element_x + 0.5f) * col_scale - 0.5f); int sy = floor(fy); int sx = floor(fx); fy -= sy; fx -= sx; if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy, src_rows); int cbufy[2]; fy = fy * INTER_RESIZE_COEF_SCALE; cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy); cbufy[1] = rint(fy); int sx_ = INC(sx, src_cols); int cbufx[2]; fx = fx * INTER_RESIZE_COEF_SCALE; cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx)); cbufx[1] = rint(fx); if (channels == 1) { int src_index0 = sy * src_stride + sx; int src_index1 = sy * src_stride + sx_; int src_index2 = sy_ * src_stride + sx; int src_index3 = sy_ * src_stride + sx_; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = cbufy[0] * cbufx[0] * src[src_index0] + cbufy[0] * cbufx[1] * src[src_index1] + cbufy[1] * cbufx[0] * src[src_index2] + cbufy[1] * cbufx[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 2) { uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride); uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride); uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride); uchar2 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } } __global__ void resizeLinearKernel(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fx = ((element_x + 0.5f) * col_scale - 0.5f); float fy = ((element_y + 0.5f) * row_scale - 0.5f); int sx = floor(fx); int sy = floor(fy); fx -= sx; fy -= sy; if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy,src_rows); float cbufy[2]; cbufy[0] = 1.f - fy; cbufy[1] = 1.f - cbufy[0]; int sx_ = INC(sx,src_cols); float cbufx[2]; cbufx[0] = 1.f - fx; cbufx[1] = 1.f - cbufx[0]; if (channels == 1) { int index = sy * src_stride; float src1 = src[index + sx]; float src2 = src[index + sx_]; float value1 = cbufy[0] * cbufx[0] * src1; float value2 = cbufy[0] * cbufx[1] * src2; float sum = 0.f; sum += value1 + value2; index = sy_ * src_stride; src1 = src[index + sx]; src2 = src[index + sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1 + value2; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = sy * src_stride; float3 src1 = ((float3*)(src + index))[sx]; float3 src2 = ((float3*)(src + index))[sx_]; float3 value1 = cbufy[0] * cbufx[0] * src1; float3 value2 = cbufy[0] * cbufx[1] * src2; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float3*)(src + index))[sx]; src2 = ((float3*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = sy * src_stride; float4 src1 = ((float4*)(src + index))[sx]; float4 src2 = ((float4*)(src + index))[sx_]; float4 value1 = cbufy[0] * cbufx[0] * src1; float4 value2 = cbufy[0] * cbufx[1] * src2; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float4*)(src + index))[sx]; src2 = ((float4*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } RetCode resizeLinear(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; hipLaunchKernelGGL(( resizeLinearKernel), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeLinear(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; double col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; hipLaunchKernelGGL(( resizeLinearKernel), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeLinear<uchar, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<uchar, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<uchar, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } /************************** resizeNearestPoint() ***************************/ template <typename T0, typename T1> __global__ void resizeNearestPointKernel(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = element_y * row_scale; sy = MIN(sy, src_rows - 1); int sx = element_x * col_scale; sx = MIN(sx, src_cols - 1); T0* input = (T0*)(src + sy* src_stride); T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = input[sx]; } RetCode resizeNearestPoint(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; if (channels == 1) { hipLaunchKernelGGL(( resizeNearestPointKernel<uchar, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeNearestPointKernel<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeNearestPointKernel<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeNearestPoint(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; if (channels == 1) { hipLaunchKernelGGL(( resizeNearestPointKernel<float, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeNearestPointKernel<float3, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeNearestPointKernel<float4, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeNearestPoint<uchar, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<uchar, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<uchar, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } /****************************** ResizeArea() *******************************/ template <typename T> __global__ void resizeAreaKernel0C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float sum = 0.f; T* input; for (int i = y_start; i < y_end; ++i) { input = (T*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturate_cast(sum); } else { output[element_x] = sum; } } template <typename T0, typename T1> __global__ void resizeAreaKernel0C2(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float2 sum = make_float2(0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float2>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel0C3(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float3 sum = make_float3(0.f, 0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float3>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel0C4(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float4>(sum); } template <typename T> __global__ void resizeAreaKernel1C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T* input; float sum = 0.f; float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * (sy1 - fsy1) * (sx1 - fsx1); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx] * (sy1 - fsy1); } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * (sy1 - fsy1) * (fsx2 - sx2); } } input = (T*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * ((sx1 - fsx1)); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx]; } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * ((fsx2 - sx2)); } input += src_stride; } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * (fsy2 - sy2) * (sx1 - fsx1); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx] * (fsy2 - sy2); } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * (fsy2 - sy2) * (fsx2 - sx2); } } sum = sum / area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturate_cast(sum); } else { output[element_x] = sum; } } template <typename T0, typename T1> __global__ void resizeAreaKernel1C2(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float2 value; float2 sum = make_float2(0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float2>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel1C3(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float3 value; float3 sum = make_float3(0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float3>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel1C4(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float4 value; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float4>(sum); } __global__ void resizeAreaKernel2(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = floor(element_y * row_scale); int sx = floor(element_x * col_scale); float fy = element_y + 1 - (sy + 1) * inv_row_scale; float fx = element_x + 1 - (sx + 1) * inv_col_scale; fy = fy <= 0 ? 0.f : fy - floor(fy); fx = fx <= 0 ? 0.f : fx - floor(fx); if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy, src_rows); int cbufy[2]; fy = fy * INTER_RESIZE_COEF_SCALE; cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy); cbufy[1] = rint(fy); int sx_ = INC(sx, src_cols); int cbufx[2]; fx = fx * INTER_RESIZE_COEF_SCALE; cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx)); cbufx[1] = rint(fx); if (channels == 1) { int src_index0 = sy * src_stride + sx; int src_index1 = sy * src_stride + sx_; int src_index2 = sy_ * src_stride + sx; int src_index3 = sy_ * src_stride + sx_; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = cbufy[0] * cbufx[0] * src[src_index0] + cbufy[0] * cbufx[1] * src[src_index1] + cbufy[1] * cbufx[0] * src[src_index2] + cbufy[1] * cbufx[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 2) { uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride); uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride); uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride); uchar2 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } } __global__ void resizeAreaKernel2(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = floor(element_y * row_scale); int sx = floor(element_x * col_scale); float fy = element_y + 1 - (sy + 1) * inv_row_scale; float fx = element_x + 1 - (sx + 1) * inv_col_scale; fy = fy <= 0 ? 0.f : fy - floor(fy); fx = fx <= 0 ? 0.f : fx - floor(fx); if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy,src_rows); float cbufy[2]; cbufy[0] = 1.f - fy; cbufy[1] = 1.f - cbufy[0]; int sx_ = INC(sx,src_cols); float cbufx[2]; cbufx[0] = 1.f - fx; cbufx[1] = 1.f - cbufx[0]; if (channels == 1) { int index = sy * src_stride; float src1 = src[index + sx]; float src2 = src[index + sx_]; float value1 = cbufy[0] * cbufx[0] * src1; float value2 = cbufy[0] * cbufx[1] * src2; float sum = 0.f; sum += value1 + value2; index = sy_ * src_stride; src1 = src[index + sx]; src2 = src[index + sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1 + value2; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = sy * src_stride; float3 src1 = ((float3*)(src + index))[sx]; float3 src2 = ((float3*)(src + index))[sx_]; float3 value1 = cbufy[0] * cbufx[0] * src1; float3 value2 = cbufy[0] * cbufx[1] * src2; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float3*)(src + index))[sx]; src2 = ((float3*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = sy * src_stride; float4 src1 = ((float4*)(src + index))[sx]; float4 src2 = ((float4*)(src + index))[sx_]; float4 value1 = cbufy[0] * cbufx[0] * src1; float4 value2 = cbufy[0] * cbufx[1] * src2; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float4*)(src + index))[sx]; src2 = ((float4*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } RetCode resizeArea(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { hipLaunchKernelGGL(( resizeAreaKernel0C1<uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeAreaKernel0C3<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeAreaKernel0C4<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { hipLaunchKernelGGL(( resizeAreaKernel1C1<uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeAreaKernel1C3<uchar3, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeAreaKernel1C4<uchar4, uchar>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { hipLaunchKernelGGL(( resizeAreaKernel2), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeArea(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, hipStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); hipError_t code = hipSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = hipMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), hipMemcpyDeviceToDevice); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; double col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { hipLaunchKernelGGL(( resizeAreaKernel0C1<float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeAreaKernel0C3<float3, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeAreaKernel0C4<float4, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { hipLaunchKernelGGL(( resizeAreaKernel1C1<float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { hipLaunchKernelGGL(( resizeAreaKernel1C3<float3, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { hipLaunchKernelGGL(( resizeAreaKernel1C4<float4, float>), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { hipLaunchKernelGGL(( resizeAreaKernel2), dim3(grid), dim3(block), 0, stream, src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } code = hipGetLastError(); if (code != hipSuccess) { LOG(ERROR) << "CUDA error: " << hipGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeArea<uchar, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<uchar, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<uchar, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 1>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 3>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 4>(hipStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
f8580a25812fd7ebef41c489dfbdd2996b7f6a0d.cu
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with this * work for additional information regarding copyright ownership. The ASF * licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. */ #include "ppl/cv/cuda/resize.h" #include "utility.hpp" #define MIN(a,b) ((a) < (b) ? (a) : (b)) #define MAX(a,b) ((a) > (b) ? (a) : (b)) #define INC(x, l) ((x + 1) >= (l) ? (x) : ((x) + 1)) #define INTER_RESIZE_COEF_BITS 11 #define INTER_RESIZE_COEF_SCALE (1 << INTER_RESIZE_COEF_BITS) #define CAST_BITS (INTER_RESIZE_COEF_BITS << 1) using namespace ppl::common; namespace ppl { namespace cv { namespace cuda { template <typename T> __DEVICE__ T bilinearSampleUchar(T t[][2], int x0, int x1, int y0, int y1); template <> __DEVICE__ uchar2 bilinearSampleUchar(uchar2 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int2 ret; uchar2 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar3 bilinearSampleUchar(uchar3 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int3 ret; uchar3 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3; final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } template <> __DEVICE__ uchar4 bilinearSampleUchar(uchar4 t[][2], int x0, int x1, int y0, int y1) { int a0 = y0 * x0; int a1 = y0 * x1; int a2 = y1 * x0; int a3 = y1 * x1; int4 ret; uchar4 final_ret; ret.x = t[0][0].x * a0 + t[0][1].x * a1 + t[1][0].x * a2 + t[1][1].x * a3; final_ret.x = (ret.x + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.y = t[0][0].y * a0 + t[0][1].y * a1 + t[1][0].y * a2 + t[1][1].y * a3; final_ret.y = (ret.y + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.z = t[0][0].z * a0 + t[0][1].z * a1 + t[1][0].z * a2 + t[1][1].z * a3; final_ret.z = (ret.z + (1<<(CAST_BITS-1))) >> CAST_BITS; ret.w = t[0][0].w * a0 + t[0][1].w * a1 + t[1][0].w * a2 + t[1][1].w * a3; final_ret.w = (ret.w + (1<<(CAST_BITS-1))) >> CAST_BITS; return final_ret; } /***************************** ResizeLinear() ******************************/ __global__ void resizeLinearKernel(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fy = ((element_y + 0.5f) * row_scale - 0.5f); float fx = ((element_x + 0.5f) * col_scale - 0.5f); int sy = floor(fy); int sx = floor(fx); fy -= sy; fx -= sx; if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy, src_rows); int cbufy[2]; fy = fy * INTER_RESIZE_COEF_SCALE; cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy); cbufy[1] = rint(fy); int sx_ = INC(sx, src_cols); int cbufx[2]; fx = fx * INTER_RESIZE_COEF_SCALE; cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx)); cbufx[1] = rint(fx); if (channels == 1) { int src_index0 = sy * src_stride + sx; int src_index1 = sy * src_stride + sx_; int src_index2 = sy_ * src_stride + sx; int src_index3 = sy_ * src_stride + sx_; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = cbufy[0] * cbufx[0] * src[src_index0] + cbufy[0] * cbufx[1] * src[src_index1] + cbufy[1] * cbufx[0] * src[src_index2] + cbufy[1] * cbufx[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 2) { uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride); uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride); uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride); uchar2 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } } __global__ void resizeLinearKernel(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fx = ((element_x + 0.5f) * col_scale - 0.5f); float fy = ((element_y + 0.5f) * row_scale - 0.5f); int sx = floor(fx); int sy = floor(fy); fx -= sx; fy -= sy; if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy,src_rows); float cbufy[2]; cbufy[0] = 1.f - fy; cbufy[1] = 1.f - cbufy[0]; int sx_ = INC(sx,src_cols); float cbufx[2]; cbufx[0] = 1.f - fx; cbufx[1] = 1.f - cbufx[0]; if (channels == 1) { int index = sy * src_stride; float src1 = src[index + sx]; float src2 = src[index + sx_]; float value1 = cbufy[0] * cbufx[0] * src1; float value2 = cbufy[0] * cbufx[1] * src2; float sum = 0.f; sum += value1 + value2; index = sy_ * src_stride; src1 = src[index + sx]; src2 = src[index + sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1 + value2; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = sy * src_stride; float3 src1 = ((float3*)(src + index))[sx]; float3 src2 = ((float3*)(src + index))[sx_]; float3 value1 = cbufy[0] * cbufx[0] * src1; float3 value2 = cbufy[0] * cbufx[1] * src2; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float3*)(src + index))[sx]; src2 = ((float3*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = sy * src_stride; float4 src1 = ((float4*)(src + index))[sx]; float4 src2 = ((float4*)(src + index))[sx_]; float4 value1 = cbufy[0] * cbufx[0] * src1; float4 value2 = cbufy[0] * cbufx[1] * src2; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float4*)(src + index))[sx]; src2 = ((float4*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } RetCode resizeLinear(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeLinear(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; double col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; resizeLinearKernel<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeLinear<uchar, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<uchar, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<uchar, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeLinear<float, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeLinear(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } /************************** resizeNearestPoint() ***************************/ template <typename T0, typename T1> __global__ void resizeNearestPointKernel(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = element_y * row_scale; sy = MIN(sy, src_rows - 1); int sx = element_x * col_scale; sx = MIN(sx, src_cols - 1); T0* input = (T0*)(src + sy* src_stride); T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = input[sx]; } RetCode resizeNearestPoint(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; if (channels == 1) { resizeNearestPointKernel<uchar, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeNearestPointKernel<uchar3, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeNearestPointKernel<uchar4, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeNearestPoint(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 4; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; if (channels == 1) { resizeNearestPointKernel<float, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeNearestPointKernel<float3, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeNearestPointKernel<float4, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeNearestPoint<uchar, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<uchar, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<uchar, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeNearestPoint<float, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeNearestPoint(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } /****************************** ResizeArea() *******************************/ template <typename T> __global__ void resizeAreaKernel0C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float sum = 0.f; T* input; for (int i = y_start; i < y_end; ++i) { input = (T*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturate_cast(sum); } else { output[element_x] = sum; } } template <typename T0, typename T1> __global__ void resizeAreaKernel0C2(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float2 sum = make_float2(0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float2>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel0C3(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float3 sum = make_float3(0.f, 0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float3>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel0C4(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, int col_scale, int row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int x_start = element_x * col_scale; int y_start = element_y * row_scale; int x_end = x_start + col_scale; int y_end = y_start + row_scale; x_end = (x_end <= src_cols) ? x_end : src_cols; y_end = (y_end <= src_rows) ? y_end : src_rows; int area = (x_end - x_start) * (y_end - y_start); float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); T0* input; for (int i = y_start; i < y_end; ++i) { input = (T0*)(src + i * src_stride); for (int j = x_start; j < x_end; ++j) { sum += input[j]; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float4>(sum); } template <typename T> __global__ void resizeAreaKernel1C1(const T* src, int src_rows, int src_cols, int channels, int src_stride, T* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T* input; float sum = 0.f; float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * (sy1 - fsy1) * (sx1 - fsx1); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx] * (sy1 - fsy1); } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * (sy1 - fsy1) * (fsx2 - sx2); } } input = (T*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * ((sx1 - fsx1)); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx]; } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * ((fsx2 - sx2)); } input += src_stride; } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { sum = sum + input[sx1 - 1] * (fsy2 - sy2) * (sx1 - fsx1); } for (int dx = sx1; dx < sx2; ++dx) { sum = sum + input[dx] * (fsy2 - sy2); } if (fsx2 - sx2 > 1e-3) { sum = sum + input[sx2] * (fsy2 - sy2) * (fsx2 - sx2); } } sum = sum / area; T* output = (T*)(dst + element_y * dst_stride); if (sizeof(T) == 1) { output[element_x] = saturate_cast(sum); } else { output[element_x] = sum; } } template <typename T0, typename T1> __global__ void resizeAreaKernel1C2(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float2 value; float2 sum = make_float2(0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float2>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel1C3(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float3 value; float3 sum = make_float3(0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float3>(sum); } template <typename T0, typename T1> __global__ void resizeAreaKernel1C4(const T1* src, int src_rows, int src_cols, int channels, int src_stride, T1* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } float fsy1 = element_y * row_scale; float fsy2 = fsy1 + row_scale; int sy1 = ceilf(fsy1); int sy2 = floorf(fsy2); float fsx1 = element_x * col_scale; float fsx2 = fsx1 + col_scale; int sx1 = ceilf(fsx1); int sx2 = floorf(fsx2); T0* input; float4 value; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); float area = fminf(col_scale, src_cols - fsx1) * fminf(row_scale, src_rows - fsy1); if (sy1 - fsy1 > 1e-3) { input = (T0*)(src + (sy1 - 1) * src_stride); if (sx1 - fsx1 > 1e-3) { value = (sy1 - fsy1) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (sy1 - fsy1) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (sy1 - fsy1) * (fsx2 - sx2) * input[sx2]; sum += value; } } input = (T0*)(src + sy1 * src_stride); for (int dy = sy1; dy < sy2; ++dy) { if (sx1 - fsx1 > 1e-3) { value = (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { sum += input[dx]; } if (fsx2 - sx2 > 1e-3) { value = (fsx2 - sx2) * input[sx2]; sum += value; } input = (T0*)((T1*)input + src_stride); } if (fsy2 - sy2 > 1e-3) { if (sx1 - fsx1 > 1e-3) { value = (fsy2 - sy2) * (sx1 - fsx1) * input[sx1 - 1]; sum += value; } for (int dx = sx1; dx < sx2; ++dx) { value = (fsy2 - sy2) * input[dx]; sum += value; } if (fsx2 - sx2 > 1e-3) { value = (fsy2 - sy2) * (fsx2 - sx2) * input[sx2]; sum += value; } } sum /= area; T0* output = (T0*)(dst + element_y * dst_stride); output[element_x] = saturate_cast_vector<T0, float4>(sum); } __global__ void resizeAreaKernel2(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, float col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = floor(element_y * row_scale); int sx = floor(element_x * col_scale); float fy = element_y + 1 - (sy + 1) * inv_row_scale; float fx = element_x + 1 - (sx + 1) * inv_col_scale; fy = fy <= 0 ? 0.f : fy - floor(fy); fx = fx <= 0 ? 0.f : fx - floor(fx); if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy, src_rows); int cbufy[2]; fy = fy * INTER_RESIZE_COEF_SCALE; cbufy[0] = rint(INTER_RESIZE_COEF_SCALE - fy); cbufy[1] = rint(fy); int sx_ = INC(sx, src_cols); int cbufx[2]; fx = fx * INTER_RESIZE_COEF_SCALE; cbufx[0] = rint(INTER_RESIZE_COEF_SCALE - rint(fx)); cbufx[1] = rint(fx); if (channels == 1) { int src_index0 = sy * src_stride + sx; int src_index1 = sy * src_stride + sx_; int src_index2 = sy_ * src_stride + sx; int src_index3 = sy_ * src_stride + sx_; int dst_index = element_y * dst_stride + element_x; int sum = 0; sum = cbufy[0] * cbufx[0] * src[src_index0] + cbufy[0] * cbufx[1] * src[src_index1] + cbufy[1] * cbufx[0] * src[src_index2] + cbufy[1] * cbufx[1] * src[src_index3]; dst[dst_index] = (sum + (1 << (CAST_BITS - 1))) >> CAST_BITS; } else if (channels == 2) { uchar2* input0 = (uchar2*)((uchar*)src + sy * src_stride); uchar2* input1 = (uchar2*)((uchar*)src + sy_ * src_stride); uchar2* output = (uchar2*)((uchar*)dst + element_y * dst_stride); uchar2 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else if (channels == 3) { uchar3* input0 = (uchar3*)((uchar*)src + sy * src_stride); uchar3* input1 = (uchar3*)((uchar*)src + sy_ * src_stride); uchar3* output = (uchar3*)((uchar*)dst + element_y * dst_stride); uchar3 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } else { uchar4* input0 = (uchar4*)((uchar*)src + sy * src_stride); uchar4* input1 = (uchar4*)((uchar*)src + sy_ * src_stride); uchar4* output = (uchar4*)((uchar*)dst + element_y * dst_stride); uchar4 t[2][2]; t[0][0] = input0[sx]; t[0][1] = input0[sx_]; t[1][0] = input1[sx]; t[1][1] = input1[sx_]; output[element_x] = bilinearSampleUchar(t, cbufx[0], cbufx[1], cbufy[0], cbufy[1]); } } __global__ void resizeAreaKernel2(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, double col_scale, float row_scale, float inv_col_scale, float inv_row_scale) { int element_x = blockIdx.x * blockDim.x + threadIdx.x; int element_y = blockIdx.y * blockDim.y + threadIdx.y; if (element_y >= dst_rows || element_x >= dst_cols) { return; } int sy = floor(element_y * row_scale); int sx = floor(element_x * col_scale); float fy = element_y + 1 - (sy + 1) * inv_row_scale; float fx = element_x + 1 - (sx + 1) * inv_col_scale; fy = fy <= 0 ? 0.f : fy - floor(fy); fx = fx <= 0 ? 0.f : fx - floor(fx); if (sy < 0) { sy = 0; fy = 0; } if (sx < 0) { sx = 0; fx = 0; } if (sy >= src_rows) { sy = src_rows - 1; fy = 0; } if (sx >= src_cols) { sx = src_cols - 1; fx = 0; } int sy_ = INC(sy,src_rows); float cbufy[2]; cbufy[0] = 1.f - fy; cbufy[1] = 1.f - cbufy[0]; int sx_ = INC(sx,src_cols); float cbufx[2]; cbufx[0] = 1.f - fx; cbufx[1] = 1.f - cbufx[0]; if (channels == 1) { int index = sy * src_stride; float src1 = src[index + sx]; float src2 = src[index + sx_]; float value1 = cbufy[0] * cbufx[0] * src1; float value2 = cbufy[0] * cbufx[1] * src2; float sum = 0.f; sum += value1 + value2; index = sy_ * src_stride; src1 = src[index + sx]; src2 = src[index + sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1 + value2; index = element_y * dst_stride + element_x; dst[index] = sum; } else if (channels == 3) { int index = sy * src_stride; float3 src1 = ((float3*)(src + index))[sx]; float3 src2 = ((float3*)(src + index))[sx_]; float3 value1 = cbufy[0] * cbufx[0] * src1; float3 value2 = cbufy[0] * cbufx[1] * src2; float3 sum = make_float3(0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float3*)(src + index))[sx]; src2 = ((float3*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float3* output = (float3*)(dst + element_y * dst_stride); output[element_x] = sum; } else { int index = sy * src_stride; float4 src1 = ((float4*)(src + index))[sx]; float4 src2 = ((float4*)(src + index))[sx_]; float4 value1 = cbufy[0] * cbufx[0] * src1; float4 value2 = cbufy[0] * cbufx[1] * src2; float4 sum = make_float4(0.f, 0.f, 0.f, 0.f); sum += value1; sum += value2; index = sy_ * src_stride; src1 = ((float4*)(src + index))[sx]; src2 = ((float4*)(src + index))[sx_]; value1 = cbufy[1] * cbufx[0] * src1; value2 = cbufy[1] * cbufx[1] * src2; sum += value1; sum += value2; float4* output = (float4*)(dst + element_y * dst_stride); output[element_x] = sum; } } RetCode resizeArea(const uchar* src, int src_rows, int src_cols, int channels, int src_stride, uchar* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(uchar), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; float col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { resizeAreaKernel0C1<uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel0C3<uchar3, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel0C4<uchar4, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { resizeAreaKernel1C1<uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel1C3<uchar3, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel1C4<uchar4, uchar><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } RetCode resizeArea(const float* src, int src_rows, int src_cols, int channels, int src_stride, float* dst, int dst_rows, int dst_cols, int dst_stride, cudaStream_t stream) { PPL_ASSERT(src != nullptr); PPL_ASSERT(dst != nullptr); PPL_ASSERT(src_rows > 0 && src_cols > 0); PPL_ASSERT(dst_rows > 0 && dst_cols > 0); PPL_ASSERT(channels == 1 || channels == 3 || channels == 4); PPL_ASSERT(src_stride >= src_cols * channels); PPL_ASSERT(dst_stride >= dst_cols * channels); cudaError_t code = cudaSuccess; if (src_rows == dst_rows && src_cols == dst_cols && src_stride == dst_stride) { if (src != dst) { code = cudaMemcpyAsync(dst, src, src_rows * src_stride * sizeof(float), cudaMemcpyDeviceToDevice); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_MEMORY_ERROR; } } return RC_SUCCESS; } const int kBlockX = 32; const int kBlockY = 16; dim3 block(kBlockX, kBlockY); dim3 grid; grid.x = (dst_cols + kBlockX -1) / kBlockX; grid.y = (dst_rows + kBlockY - 1) / kBlockY; double col_scale = (double)src_cols / dst_cols; float row_scale = (double)src_rows / dst_rows; float inv_col_scale = 1.0 / col_scale; float inv_row_scale = 1.0 / row_scale; if (src_cols > dst_cols && src_rows > dst_rows) { if (src_cols % dst_cols == 0 && src_rows % dst_rows == 0) { if (channels == 1) { resizeAreaKernel0C1<float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel0C3<float3, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel0C4<float4, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } else { if (channels == 1) { resizeAreaKernel1C1<float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else if (channels == 3) { resizeAreaKernel1C3<float3, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } else { resizeAreaKernel1C4<float4, float><<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale); } } } else { resizeAreaKernel2<<<grid, block, 0, stream>>>(src, src_rows, src_cols, channels, src_stride, dst, dst_rows, dst_cols, dst_stride, col_scale, row_scale, inv_col_scale, inv_row_scale); } code = cudaGetLastError(); if (code != cudaSuccess) { LOG(ERROR) << "CUDA error: " << cudaGetErrorString(code); return RC_DEVICE_RUNTIME_ERROR; } return RC_SUCCESS; } template <> RetCode ResizeArea<uchar, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<uchar, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<uchar, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const uchar* inData, int outHeight, int outWidth, int outWidthStride, uchar* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 1>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 1, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 3>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 3, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } template <> RetCode ResizeArea<float, 4>(cudaStream_t stream, int inHeight, int inWidth, int inWidthStride, const float* inData, int outHeight, int outWidth, int outWidthStride, float* outData) { RetCode code = resizeArea(inData, inHeight, inWidth, 4, inWidthStride, outData, outHeight, outWidth, outWidthStride, stream); return code; } } // namespace cuda } // namespace cv } // namespace ppl
8a00c6eecce155725a6697c1df1c999d70b8ecb7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "common.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> // copied from cutorch/lib/THC/THCTensorRandom.cu #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS) __global__ void rreluUpdateOutputTrain(int n, hiprandStateMtgp32_t *state, float *input, float* noise, float *output, double a, double b) { CUDA_KERNEL_LOOP(i, n) { if (input[i] <= 0) { float r = hiprand_uniform(&state[blockIdx.x]); r = r * (b-a) + a; output[i] = input[i] * r; noise[i] = r; } else { output[i] = input[i]; noise[i] = 1; } } } struct RReLUUpdateOutputEval_functor { const float negSlope_; RReLUUpdateOutputEval_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *out, float *in) { const float x = *in; const float r = x <= 0 ? negSlope_ : 1; *out = x * r; } }; struct RReLUUpdateOutputEvalIP_functor { const float negSlope_; RReLUUpdateOutputEvalIP_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *x) { if (*x <= 0) { *x = *x * negSlope_; } } }; void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THAssert(THCudaTensor_checkGPU(state, 3, input, output, noise)); if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } if (train) { input = THCudaTensor_newContiguous(state, input); THCudaTensor_resizeAs(state, noise, input); float *input_data = THCudaTensor_data(state, input); float *noise_data = THCudaTensor_data(state, noise); long n = THCudaTensor_nElement(state, input); if (inplace) { hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, state->rngState->current_gen->gen_states, input_data, noise_data, input_data, lower, upper); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); float *output_data = THCudaTensor_data(state, output); hipLaunchKernelGGL(( rreluUpdateOutputTrain), dim3(NUM_BLOCKS(n)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), n, state->rngState->current_gen->gen_states, input_data, noise_data, output_data, lower, upper); } THCudaTensor_free(state, input); } else { const double negSlope = (lower + upper) / 2; if (inplace) { THCudaTensor_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope)); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope)); } } } struct RReLUupdateGradInputEval_functor { const float negSlope_; RReLUupdateGradInputEval_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in) { *gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut); } }; struct RReLUupdateGradInputEvalIP_functor { const float negSlope_; RReLUupdateGradInputEvalIP_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *gradOut, float *in) { if (*in <= 0) { *gradOut = (*gradOut) * negSlope_; } } }; void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace) { THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, gradInput, noise)); gradOutput = THCudaTensor_newContiguous(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCudaTensor_cmul(state, gradOutput, gradOutput, noise); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_cmul(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const double negSlope = (lower + upper) / 2; if (inplace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope)); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope)); } } THCudaTensor_free(state, gradOutput); }
8a00c6eecce155725a6697c1df1c999d70b8ecb7.cu
#include "THCUNN.h" #include "common.h" #include <curand.h> #include <curand_kernel.h> // copied from cutorch/lib/THC/THCTensorRandom.cu #define MAX_NUM_BLOCKS 64 #define BLOCK_SIZE 256 #define NUM_BLOCKS(n) min((int)THCCeilDiv(n, (long) BLOCK_SIZE), MAX_NUM_BLOCKS) __global__ void rreluUpdateOutputTrain(int n, curandStateMtgp32 *state, float *input, float* noise, float *output, double a, double b) { CUDA_KERNEL_LOOP(i, n) { if (input[i] <= 0) { float r = curand_uniform(&state[blockIdx.x]); r = r * (b-a) + a; output[i] = input[i] * r; noise[i] = r; } else { output[i] = input[i]; noise[i] = 1; } } } struct RReLUUpdateOutputEval_functor { const float negSlope_; RReLUUpdateOutputEval_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *out, float *in) { const float x = *in; const float r = x <= 0 ? negSlope_ : 1; *out = x * r; } }; struct RReLUUpdateOutputEvalIP_functor { const float negSlope_; RReLUUpdateOutputEvalIP_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *x) { if (*x <= 0) { *x = *x * negSlope_; } } }; void THNN_CudaRReLU_updateOutput(THCState *state, THCudaTensor *input, THCudaTensor *output, THCudaTensor *noise, double lower, double upper, bool train, bool inplace, void *generator) { THAssert(THCudaTensor_checkGPU(state, 3, input, output, noise)); if (state->rngState->current_gen == NULL) { THError("Random number generators have not been initialized."); } if (train) { input = THCudaTensor_newContiguous(state, input); THCudaTensor_resizeAs(state, noise, input); float *input_data = THCudaTensor_data(state, input); float *noise_data = THCudaTensor_data(state, noise); long n = THCudaTensor_nElement(state, input); if (inplace) { rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, state->rngState->current_gen->gen_states, input_data, noise_data, input_data, lower, upper); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); float *output_data = THCudaTensor_data(state, output); rreluUpdateOutputTrain<<<NUM_BLOCKS(n), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( n, state->rngState->current_gen->gen_states, input_data, noise_data, output_data, lower, upper); } THCudaTensor_free(state, input); } else { const double negSlope = (lower + upper) / 2; if (inplace) { THCudaTensor_pointwiseApply1(state, input, RReLUUpdateOutputEvalIP_functor(negSlope)); THCudaTensor_set(state, output, input); } else { THCudaTensor_resizeAs(state, output, input); THCudaTensor_pointwiseApply2(state, output, input, RReLUUpdateOutputEval_functor(negSlope)); } } } struct RReLUupdateGradInputEval_functor { const float negSlope_; RReLUupdateGradInputEval_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *gradIn, float *gradOut, float *in) { *gradIn = (*in) <= 0 ? (*gradOut) * negSlope_ : (*gradOut); } }; struct RReLUupdateGradInputEvalIP_functor { const float negSlope_; RReLUupdateGradInputEvalIP_functor(float negSlope) : negSlope_(negSlope) {} __device__ __forceinline__ void operator()(float *gradOut, float *in) { if (*in <= 0) { *gradOut = (*gradOut) * negSlope_; } } }; void THNN_CudaRReLU_updateGradInput(THCState *state, THCudaTensor *input, THCudaTensor *gradOutput, THCudaTensor *gradInput, THCudaTensor *noise, double lower, double upper, bool train, bool inplace) { THAssert(THCudaTensor_checkGPU(state, 4, input, gradOutput, gradInput, noise)); gradOutput = THCudaTensor_newContiguous(state, gradOutput); if (train && upper - lower > 1E-6) // e.g. if upper == lower, RReLU behaves like LeakyReLU { // multiply the gradient by the noise tensor if (inplace) { THCudaTensor_cmul(state, gradOutput, gradOutput, noise); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_cmul(state, gradInput, gradOutput, noise); } } else { // use constant factor for negative input values const double negSlope = (lower + upper) / 2; if (inplace) { THCudaTensor_pointwiseApply2(state, gradOutput, input, RReLUupdateGradInputEvalIP_functor(negSlope)); THCudaTensor_set(state, gradInput, gradOutput); } else { THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_pointwiseApply3(state, gradInput, gradOutput, input, RReLUupdateGradInputEval_functor(negSlope)); } } THCudaTensor_free(state, gradOutput); }
7b59293c8b2d54608249f7be3e6b1fd143e95027.hip
// !!! This is a file automatically generated by hipify!!! //this version is 233 ms #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<iostream> #include<cudnn.h> #include <stdio.h> #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include<rocblas.h> #include<string> #include<fstream> #include<cmath> #include<ctime> using namespace std; void readWeights(float* weights, int m/*output*/, int n/*input*/, int h, int w, string baseFileName, bool readWeights = true) { string fileName = "weights2/" + baseFileName; if (readWeights) { fileName += "Weights.data"; } else { fileName += "Biases.data"; } ifstream in(fileName, ios::in | ios::binary); //cout << fileName << "\n"; if (!in.is_open()) { cout << "file " << baseFileName << " didn't open \n"; return; } in.read((char*)weights, m*n*h*w * sizeof(float)); in.close(); //cout << baseFileName << " : " << weights[0] << " " << weights[1] << "\n"; } #define cudnnCheck(exp){\ cudnnStatus_t status=(exp);\ if(status!=CUDNN_STATUS_SUCCESS){\ std::cout<<"Error at line "<<__LINE__<<" "<<cudnnGetErrorString(status)<<"\n";\ std::exit(EXIT_FAILURE);\ }\ }\ #define cudaCheck(exp) {\ hipError_t status=(exp);\ if(status!=hipSuccess){\ cerr<<"error at cuda "<<__LINE__<<" "<<hipGetErrorString(status)<<"\n";\ exit(EXIT_FAILURE);\ }\ }\ cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); if (image.empty()) { cerr << "couldn't open image\n"; } cv::cvtColor(image, image, cv::COLOR_BGR2RGB); image.convertTo(image, CV_32FC3); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); cv::Mat resizedImage(416, 416, CV_32FC2); cv::resize(image, resizedImage, cv::Size(416, 416), 0, 0, cv::INTER_CUBIC); if (resizedImage.empty())cerr << "resized image empty\n"; //cout << "ok\n"; return resizedImage; } void save_image(const char* output_filename, cv::Mat output_image) { //cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR); //cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); //output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } //incomplete __global__ void leaky_relu_v2(float* d_data, float alpha, int size) { int index = (blockIdx.y*gridDim.x + blockIdx.x); if (index < size) { float x = d_data[index]; if (x<0) d_data[index] = alpha*x; } } //try constant shift __global__ void leaky_relu_v3(float* d_data, float alpha, int size, int step) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < step) { int channels = (size / step); index *= channels; for (int i = index; i < index + channels; i++) { float x = d_data[i]; if (x<0) d_data[i] = alpha*x; } } } __global__ void leaky_relu_v4(float* d_data, float alpha, int size, int shift) { int index = blockIdx.y*gridDim.x + blockIdx.x; index *= shift; if (index < size - shift) { for (int i = index; i < index + shift; i++) { float x = d_data[i]; if (x<0) d_data[i] = alpha*x; } } } __global__ void leaky_relu(float* d_data, float alpha, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float x = d_data[index]; if (x<0) d_data[index] = alpha*x; } } //step is width*height of the output of convolution /* @param size is width x height x channels @Param step is width x height the data in the format HxWxC k is computed as index%(size/step) */ __global__ void add_biase(float* d_data, float* biases, int size/*WxHxC*/, int step/*WxH*/) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < step) { int biaseSize = (size / step); index *= biaseSize; for (int i = 0; i < biaseSize; i++) { d_data[index + i] += biases[i]; } } } __device__ float iou(float bx1x1, float bx1y1, float bx1x2, float bx1y2, float bx2x1, float bx2y1, float bx2x2, float bx2y2) { float x1 = (bx1x1 > bx2x1) ? bx1x1 : bx2x1; float y1 = (bx1y1> bx2y1) ? bx1y1 : bx2y1; float x2 = (bx1x2 > bx2x2) ? bx2x2 : bx1x2; float y2 = (bx1y2 > bx2y2) ? bx2y2 : bx1y2; float A1 = (bx1x2 - bx1x1)*(bx1y2 - bx1y1); float A2 = (bx2x2 - bx2x1)*(bx2y2 - bx2y1); float A_inter = ((x2 - x1) > 0 ? (x2 - x1) : 0)*((y2 - y1) > 0 ? (y2 - y1) : 0); return(A_inter / (A1 + A2 - A_inter)); } //consider calculating the necessary points only __global__ void calculate_points(float* boxes_dims, float* points, bool* boxes, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { //int left = h_boxes_dims[index] - (h_boxes_dims[index + 2] / 2.0); //int right = h_boxes_dims[index] + (h_boxes_dims[index + 2] / 2.0); //int top = h_boxes_dims[index + 1] - (h_boxes_dims[index + 3] / 2.0); //int bottom = h_boxes_dims[index + 1] + (h_boxes_dims[index + 3] / 2.0); int step = index * 4; float center_x = boxes_dims[step]; float w = boxes_dims[step + 2]; float center_y = boxes_dims[step + 1]; float h = boxes_dims[step + 3]; points[step] = center_x - ((w) / 2.0); points[step + 2] = center_x + ((w) / 2.0); points[step + 1] = center_y - ((h) / 2.0); points[step + 3] = center_y + ((h) / 2.0); } } __global__ void non_max_supression(float* points, bool* boxes, float* maxClassScore, int* maxClassIndex, float threashold = 0.3, int size = 13 * 13 * 5) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float maxClass = maxClassScore[index]; if (maxClass < 0.3) { boxes[index] = false; return; } int maxClassInd = maxClassIndex[index]; float x1 = points[index * 4]; float y1 = points[index * 4 + 1]; float x2 = points[index * 4 + 2]; float y2 = points[index * 4 + 3]; for (int i = 0; i < size; i++) { if (boxes[i] && i != index) { if (maxClassInd == maxClassIndex[i]) { if (maxClass > maxClassScore[i]) { float x = iou(x1, y1, x2, y2, points[i * 4] , points[i * 4 + 1], points[i * 4 + 2], points[i * 4 + 3]); if (x >= threashold) { boxes[i] = false; } } } } } } } //20 classes __global__ void exp(float* classes, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x; if (index<size) { classes[index] = exp(classes[index]); } } __global__ void softmax(float* classes, int offset, float sum) { if (threadIdx.x < 20) { classes[threadIdx.x + offset] /= sum; } } __global__ void filter(float* classes, bool* boxes, float threshold = 0.4, int size = 13 * 13 * 5 * 20) { int index = (blockIdx.y*gridDim.x) + blockIdx.x; if (index < size) { if (classes[index] >= threshold) { boxes[index / 20] = true; //printf("index %d value %f\n", index, classes[index]); } } } //blocks*threads __global__ void sigmoid(float* x, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x; if (index<size) { x[index] = 1 / (1 + exp(-1 * x[index])); } } //calculate centers of the box and the width and height //calculate the necessary ones __global__ void calculate_box_dims(float* x, float* d_anchors, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { //center_x = (float(col) + sigmoid(tx)) * 32.0 x[index] = (((index / (4)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; //center_y = (float(row) + sigmoid(ty)) * 32.0 x[index + 1] = ((index / (13 * 4)) + (1.0 / (1 + expf(-1 * x[index + 1]))))*32.0; //roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0 x[index + 2] = expf(x[index + 2])*d_anchors[2 * ((index / 25) % 5)] * 32.0; //roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0 x[index + 3] = expf(x[index + 3])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0; } } __global__ void sigmoid_exp(float* x, float* d_anchors, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x; if (index < size) { int cond = index % 25; switch (cond) { case 0: //center_x = (float(col) + sigmoid(tx)) * 32.0 x[index] = (((index / (125)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; break; case 1: //center_y = (float(row) + sigmoid(ty)) * 32.0 x[index] = ((index / (13 * 125)) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; break; case 2: //roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0 x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5)] * 32.0; break; case 3: //roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0 x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0; break; case 4: //confidence //if (index == 4)printf("data sample %f\n\n", x[index]); x[index] = (1.0 / (1 + expf(-1 * x[index]))); break; } //if (index <25)printf("data sample %d %f\n",index, x[index]); } } __global__ void scores(float* classes, float* confidence, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float x = confidence[index]; int step = index * 20; for (int i = 0; i < 20; i++) { classes[step + i] *= x; } } } __global__ void get_max_scores(float* classes, bool* boxes, float* maxScores, int* maxIndecies, int size = 13 * 13 * 5) { int index = blockIdx.y*gridDim.x + blockIdx.x; int classIndex = 20 * index; if (index < size) { if (boxes[index]) { float maxClassScore = classes[classIndex]; int maxClassIndex = 0; float tmp = 0; for (int i = classIndex + 1; i < classIndex + 19; i++) { tmp = classes[i]; if (tmp > maxClassScore) { maxClassScore = tmp; maxClassIndex = i - classIndex; } } //printf("from get_max_score %d %d\n", index,classIndex); maxScores[index] = maxClassScore; maxIndecies[index] = maxClassIndex; } } } __global__ void bool_arr(bool* d_boxes, int size, bool value = false) { int index = blockIdx.y*blockDim.x + blockIdx.x; if (index < size) { d_boxes[index] = value; } } __global__ void separate_data(float* predictions, float* boxes, float* confidence, float* classes, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { int x = index % 25; if (x > 4) { classes[(index / 25) * 20 + (x - 5)] = predictions[index]; } else if (x == 4) { confidence[(index / 25)] = predictions[index]; } else { //centers and bounding boxes boxes[(index / 25) * 4 + x] = predictions[index]; } } } //draw colored rectangles around objects //scale colors first //thickness = 4 pixels //size is WxH __global__ void draw(float* d_image, int x1, int y1, int x2, int y2, float r, float g, float b, int w, int h, int thickness = 4) { int index = blockIdx.y*gridDim.x + blockIdx.x; //scale for the three channels if (index < w*h) { //index *= 3; int xPos = (index / 3) % w; int yPos = (index / (3 * w)); //on the same vertical line //increase x axis if ((yPos == y1 || yPos == y2) && (xPos >= x1 && xPos <= x2)) { for (int i = 0; i < thickness; i++) { if (index < w*h) { //r d_image[index] = 0; //g d_image[index + 1] = 0; //b d_image[index + 2] = 0; //next column ie next x in image terminology as the row here is column there //remember image is at format NHWC index += 3; } } } else if ((xPos == x1 || xPos == x2) && (yPos >= y1 && yPos <= y2)) { for (int i = 0; i < thickness; i++) { if (index < w*h) { //r d_image[index] = 0; //g d_image[index + 1] = 0; //b d_image[index + 2] = 0; } index += (3 * h); } } } } template<class T> void test(T* host_data, T* device_data, int start, int end) { cout << "host data \n\n"; for (int i = start; i < end; i++) { cout << host_data[i] << " "; } cout << "\n\n"; T* tmp = (T*)malloc(end * sizeof(T)); hipMemcpy(tmp, device_data, end * sizeof(T), hipMemcpyDeviceToHost); cout << "device data \n\n"; for (int i = start; i < end; i++) { cout << tmp[i] << " "; } cout << "\n\n"; } template<class T> void test(T* device_data, int start, int end) { T* tmp = (T*)malloc(end * sizeof(T)); cudaCheck(hipMemcpy(tmp, device_data, (end) * sizeof(T), hipMemcpyDeviceToHost)); cout << "device data \n\n"; for (int i = start; i < end; i++) { cout << tmp[i] << " "; } cout << "\n\n"; //if (tmp[3] == true)cout << "True \n"; } template<class T> void test(T* device_data, int row, int col, int w, int step, int channels, int times, string name, int offset = 0, bool xDirection = true) { cout << name << "\n"; for (int j = 0; j < times; j++) { test(device_data, (col*w*channels + row*channels + j*step + offset), (col*w*channels + row*channels + (j + 1)*step)); //cout << (col*step*channels + row*channels + j*step + offset) <<" "<< (col*step*channels + row*channels + (j + 1)*step) << "\n"; } } //--------------------------------------things to be done for optimization--------------------------------------------------- //to be more memory effecient delete the unneeded values and re assign them // this maybe time costy //test that //to be space effecient free workspace but make sure it doesn't include any data related to convolution //make sure when it crashes because of memory to print that //---------------------------------------------------------------------------------------------------------------------------- #define threadsPerBlock 32 #define shift 500 int main() { // Layer kernel stride output shape // -------------------------------------------- - //Input(416,416,3) // Convolution 33 1 (416, 416, 16) // MaxPooling 22 2 (208, 208, 16) // Convolution 33 1 (208, 208, 32) // MaxPooling 22 2 (104, 104, 32) // Convolution 33 1 (104, 104, 64) // MaxPooling 22 2 (52, 52, 64) // Convolution 33 1 (52, 52, 128) // MaxPooling 22 2 (26, 26, 128) // Convolution 33 1 (26, 26, 256) // MaxPooling 22 2 (13, 13, 256) // Convolution 33 1 (13, 13, 512) // MaxPooling 22 1 (13, 13, 512) // Convolution 33 1 (13, 13, 1024) // Convolution 33 1 (13, 13, 1024) // Convolution 11 1 (13, 13, 125) // -------------------------------------------- - //all MAX POOLING is valid padding except last one but padding = 0 //all CONV are SAME padding with p = 1 int imageH = 416, imageW = 416; float x = 1.0, y = 0.0; float* alpha = &x; float *beta = &y; long long totalSpace = 0; size_t space = 0; //std::cout << "ok\n"; cudnnHandle_t cudnn; cudnnCheck(cudnnCreate(&cudnn)); //input layer cudnnTensorDescriptor_t inputDes; cudnnCheck(cudnnCreateTensorDescriptor(&inputDes)); cudnnCheck(cudnnSetTensor4dDescriptor(inputDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 3, imageH, imageW)); //cv::Mat image = load_image("person.jpg"); //std::cout << "image loaded with dims " << image.cols << " X " << image.rows << "\n"; //for (int i = 0; i < 20; i++)std::cout << image.at<float>(cv::Point(0, i)) << " "; //std::cout << "\n\n"; float* d_input; hipMalloc(&d_input, imageH*imageW * 3 * sizeof(float)); totalSpace += imageH*imageW * 3 * sizeof(float); //load W1 float* w1 = (float*)malloc(16 * 3 * 3 * 3 * sizeof(float)); readWeights(w1, 16, 3, 3, 3, "conv1"); float* d_w1; cudaCheck(hipMalloc(&d_w1, 16 * 3 * 3 * 3 * sizeof(float))); totalSpace += 16 * 3 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(hipMemcpy(d_w1, w1, 16 * 3 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice)); //(416, 416, 16) float* d_conv1Out; cudaCheck(hipMalloc(&d_conv1Out, 16 * imageH * imageW * sizeof(float))); totalSpace += 16 * imageH * imageW * sizeof(float); //copy data to GPU //don't forget to add the biases float* b1 = (float*)malloc(16 * sizeof(float)); readWeights(b1, 16, 1, 1, 1, "conv1", false); float* d_b1; cudaCheck(hipMalloc(&d_b1, 16 * sizeof(float))); cudaCheck(hipMemcpy(d_b1, b1, 16 * sizeof(float), hipMemcpyHostToDevice)); float* d_max1Out; cudaCheck(hipMalloc(&d_max1Out, 208 * 208 * 16 * sizeof(float))); totalSpace += 208 * 208 * 16 * sizeof(float); //load W2 float* w2 = (float*)malloc(32 * 16 * 3 * 3 * sizeof(float)); readWeights(w2, 32, 16, 3, 3, "conv2"); float* d_w2; cudaCheck(hipMalloc(&d_w2, 32 * 16 * 3 * 3 * sizeof(float))); totalSpace += 32 * 16 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(hipMemcpy(d_w2, w2, 32 * 16 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice)); float* d_conv2Out; cudaCheck(hipMalloc(&d_conv2Out, 32 * 208 * 208 * sizeof(float))); totalSpace += 32 * 208 * 208 * sizeof(float); //don't forget to add the biases float* b2 = (float*)malloc(32 * sizeof(float)); readWeights(b2, 32, 1, 1, 1, "conv2", false); float* d_b2; cudaCheck(hipMalloc(&d_b2, 32 * sizeof(float))); cudaCheck(hipMemcpy(d_b2, b2, 32 * sizeof(float), hipMemcpyHostToDevice)); //load W3 float* w3 = (float*)malloc(64 * 32 * 3 * 3 * sizeof(float)); readWeights(w3, 64, 32, 3, 3, "conv3"); float* d_w3; hipMalloc(&d_w3, 64 * 32 * 3 * 3 * sizeof(float)); totalSpace += 64 * 32 * 3 * 3 * sizeof(float); //copy weights to GPU hipMemcpy(d_w3, w3, 64 * 32 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); float* b3 = (float*)malloc(64 * sizeof(float)); readWeights(b3, 64, 1, 1, 1, "conv3", false); float* d_b3; hipMalloc(&d_b3, 64 * sizeof(float)); hipMemcpy(d_b3, b3, 64 * sizeof(float), hipMemcpyHostToDevice); float* d_max3Out; hipMalloc(&d_max3Out, 52 * 52 * 64 * sizeof(float)); totalSpace += 52 * 52 * 64 * sizeof(float); //load W4 float* w4 = (float*)malloc(128 * 64 * 3 * 3 * sizeof(float)); readWeights(w4, 128, 64, 3, 3, "conv4"); float* d_w4; hipMalloc(&d_w4, 128 * 64 * 3 * 3 * sizeof(float)); totalSpace += 128 * 64 * 3 * 3 * sizeof(float); //copy weights to GPU hipMemcpy(d_w4, w4, 128 * 64 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); float* d_conv4Out; hipMalloc(&d_conv4Out, 128 * 52 * 52 * sizeof(float)); totalSpace += 128 * 52 * 52 * sizeof(float); float* b4 = (float*)malloc(128 * sizeof(float)); readWeights(b4, 128, 1, 1, 1, "conv4", false); float* d_b4; hipMalloc(&d_b4, 128 * sizeof(float)); hipMemcpy(d_b4, b4, 128 * sizeof(float), hipMemcpyHostToDevice); float* d_max4Out; hipMalloc(&d_max4Out, 26 * 26 * 128 * sizeof(float)); totalSpace += 26 * 26 * 128 * sizeof(float); //load W5 float* w5 = (float*)malloc(256 * 128 * 3 * 3 * sizeof(float)); readWeights(w5, 256, 128, 3, 3, "conv5"); float* d_w5; hipMalloc(&d_w5, 256 * 128 * 3 * 3 * sizeof(float)); totalSpace += 256 * 128 * 3 * 3 * sizeof(float); //copy weights to GPU hipMemcpy(d_w5, w5, 256 * 128 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); float* d_conv5Out; hipMalloc(&d_conv5Out, 256 * 26 * 26 * sizeof(float)); totalSpace += 256 * 26 * 26 * sizeof(float); float* b5 = (float*)malloc(256 * sizeof(float)); readWeights(b5, 256, 1, 1, 1, "conv5", false); float* d_b5; hipMalloc(&d_b5, 256 * sizeof(float)); hipMemcpy(d_b5, b5, 256 * sizeof(float), hipMemcpyHostToDevice); float* d_max5Out; hipMalloc(&d_max5Out, 13 * 13 * 256 * sizeof(float)); totalSpace += 13 * 13 * 256 * sizeof(float); //load W6 float* w6 = (float*)malloc(512 * 256 * 3 * 3 * sizeof(float)); readWeights(w6, 512, 256, 3, 3, "conv6"); float* d_w6; hipMalloc(&d_w6, 512 * 256 * 3 * 3 * sizeof(float)); totalSpace += 512 * 256 * 3 * 3 * sizeof(float); //copy weights to GPU hipMemcpy(d_w6, w6, 512 * 256 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); float* d_conv6Out; hipMalloc(&d_conv6Out, 512 * 13 * 13 * sizeof(float)); totalSpace += 512 * 13 * 13 * sizeof(float); float* b6 = (float*)malloc(512 * sizeof(float)); readWeights(b6, 512, 1, 1, 1, "conv6", false); float* d_b6; hipMalloc(&d_b6, 512 * sizeof(float)); hipMemcpy(d_b6, b6, 512 * sizeof(float), hipMemcpyHostToDevice); //here there's padding and stride 1 float* d_max6Out; hipMalloc(&d_max6Out, 13 * 13 * 512 * sizeof(float)); totalSpace += 13 * 13 * 512 * sizeof(float); //load W7 float* w7 = (float*)malloc(1024 * 512 * 3 * 3 * sizeof(float)); readWeights(w7, 1024, 512, 3, 3, "conv7"); float* d_w7; hipMalloc(&d_w7, 1024 * 512 * 3 * 3 * sizeof(float)); totalSpace += 1024 * 512 * 3 * 3 * sizeof(float); //copy weights to GPU hipMemcpy(d_w7, w7, 1024 * 512 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice); float* d_conv7Out; hipMalloc(&d_conv7Out, 1024 * 13 * 13 * sizeof(float)); totalSpace += 1024 * 13 * 13 * sizeof(float); float* b7 = (float*)malloc(1024 * sizeof(float)); readWeights(b7, 1024, 1, 1, 1, "conv7", false); float* d_b7; cudaCheck(hipMalloc(&d_b7, 1024 * sizeof(float))); cudaCheck(hipMemcpy(d_b7, b7, 1024 * sizeof(float), hipMemcpyHostToDevice)); //load W8 float* w8 = (float*)malloc(1024 * 1024 * 3 * 3 * sizeof(float)); readWeights(w8, 1024, 1024, 3, 3, "conv8", true); float* d_w8; cudaCheck(hipMalloc(&d_w8, 1024 * 1024 * 3 * 3 * sizeof(float))); totalSpace += 1024 * 1024 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(hipMemcpy(d_w8, w8, 1024 * 1024 * 3 * 3 * sizeof(float), hipMemcpyHostToDevice)); float* d_conv8Out; cudaCheck(hipMalloc(&d_conv8Out, 1024 * 13 * 13 * sizeof(float))); totalSpace += 1024 * 13 * 13 * sizeof(float); float* b8 = (float*)malloc(1024 * sizeof(float)); readWeights(b8, 1024, 1, 1, 1, "conv8", false); float* d_b8; cudaCheck(hipMalloc(&d_b8, 1024 * sizeof(float))); cudaCheck(hipMemcpy(d_b8, b8, 1024 * sizeof(float), hipMemcpyHostToDevice)); //load W9 float* w9 = (float*)malloc(1024 * 125 * sizeof(float)); readWeights(w9, 1024, 125, 3, 3, "conv9", true); float* d_w9; cudaCheck(hipMalloc(&d_w9, 1024 * 125 * sizeof(float))); totalSpace += 1024 * 125 * sizeof(float); float* d_conv9Out; cudaCheck(hipMalloc(&d_conv9Out, 125 * 13 * 13 * sizeof(float))); totalSpace += 125 * 13 * 13 * sizeof(float); cout << "total space " << totalSpace / (1024 * 1024) << " MB\n"; float b9[125]; readWeights(b9, 125, 1, 1, 1, "conv9", false); float* d_b9; cudaCheck(hipMalloc(&d_b9, 125 * sizeof(float))); float* d_classes_softmax; cudaCheck(hipMalloc(&d_classes_softmax, 13 * 13 * 5 * 20 * sizeof(float))); cv::Scalar colors[20] = { cv::Scalar(254.0, 254.0, 254),cv::Scalar(239.88888888888889, 211.66666666666669, 127), cv::Scalar(225.77777777777777, 169.33333333333334, 0), cv::Scalar(211.66666666666669, 127.0, 254), cv::Scalar(197.55555555555557, 84.66666666666667, 127), cv::Scalar(183.44444444444443, 42.33333333333332, 0), cv::Scalar(169.33333333333334, 0.0, 254), cv::Scalar(155.22222222222223, -42.33333333333335, 127), cv::Scalar(141.11111111111111, -84.66666666666664, 0), cv::Scalar(127.0, 254.0, 254), cv::Scalar(112.88888888888889, 211.66666666666669, 127), cv::Scalar(98.77777777777777, 169.33333333333334, 0), cv::Scalar(84.66666666666667, 127.0, 254), cv::Scalar(70.55555555555556, 84.66666666666667, 127), cv::Scalar(56.44444444444444, 42.33333333333332, 0), cv::Scalar(42.33333333333332, 0.0, 254), cv::Scalar(28.222222222222236, -42.33333333333335, 127), cv::Scalar(14.111111111111118, -84.66666666666664, 0), cv::Scalar(0.0, 254.0, 254), cv::Scalar(-14.111111111111118, 211.66666666666669, 127) }; string classes[20] = { "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse" , "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor" }; //anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52] float h_anchors[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 }; float* d_anchors; cudaCheck(hipMalloc(&d_anchors, 10 * sizeof(float))); float* d_boxes_dims; cudaCheck(hipMalloc(&d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float))); float* d_predictions; cudaCheck(hipMalloc(&d_predictions, 13 * 13 * 5 * sizeof(float))); float* d_classes; cudaCheck(hipMalloc(&d_classes, 13 * 13 * 5 * 20 * sizeof(float))); cudaCheck(hipMemcpy(d_anchors, h_anchors, 10 * sizeof(float), hipMemcpyHostToDevice)); bool* d_boxes; cudaCheck(hipMalloc(&d_boxes, 13 * 13 * 5 * sizeof(bool))); float* d_maxScorePerBox; cudaCheck(hipMalloc(&d_maxScorePerBox, 13 * 13 * 5 * sizeof(float))); int* d_maxScoreIndex; cudaCheck(hipMalloc(&d_maxScoreIndex, 13 * 13 * 5 * sizeof(int))); float* d_points; cudaCheck(hipMalloc(&d_points, 13 * 13 * 5 * 4 * sizeof(float))); bool h_boxes[13 * 13 * 5]; float* h_points = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float)); float h_maxScorePerBox[13 * 13 * 5]; int h_maxScoreIndex[13 * 13 * 5]; float* h_boxes_dims = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float)); cudaCheck(hipMemcpy(d_b9, b9, 125 * sizeof(float), hipMemcpyHostToDevice)); //workspases void* workSpace[9] = { nullptr }; //(16X3X3X3) cudnnFilterDescriptor_t w1Des; cudnnCheck(cudnnCreateFilterDescriptor(&w1Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w1Des, CUDNN_DATA_FLOAT, 16, 3, 3, 3)); cudnnTensorDescriptor_t conv1OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv1OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv1OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 16, 416, 416)); //cout << "output format NHWC \n"; cudnnConvolutionDescriptor_t conv1Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv1Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv1Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv1Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, inputDes, w1Des, conv1Des, conv1OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv1Algo)); cudnnTensorDescriptor_t max1OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max1OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max1OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 16, 208, 208)); //cout << "max1 out NHWC\n"; cudnnPoolingDescriptor_t max1Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max1Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max1Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w2Des; cudnnCheck(cudnnCreateFilterDescriptor(&w2Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w2Des, CUDNN_DATA_FLOAT, 32, 16, 3, 3)); //(208, 208, 32) cudnnTensorDescriptor_t conv2OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv2OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv2OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 32, 208, 208)); //cout << "conv2 out NHWC\n"; cudnnConvolutionDescriptor_t conv2Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv2Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv2Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv2Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max1OutDes, w2Des, conv2Des, conv2OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv2Algo)); float* d_max2Out; hipMalloc(&d_max2Out, 104 * 104 * 32 * sizeof(float)); totalSpace += 104 * 104 * 32 * sizeof(float); cudnnTensorDescriptor_t max2OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max2OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max2OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 32, 104, 104)); cudnnPoolingDescriptor_t max2Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max2Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max2Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); //[3,3,32,64] cudnnFilterDescriptor_t w3Des; cudnnCheck(cudnnCreateFilterDescriptor(&w3Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w3Des, CUDNN_DATA_FLOAT, 64, 32, 3, 3)); //(104, 104, 64) cudnnTensorDescriptor_t conv3OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv3OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv3OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 104, 104)); float* d_conv3Out; hipMalloc(&d_conv3Out, 64 * 104 * 104 * sizeof(float)); totalSpace += 64 * 104 * 104 * sizeof(float); cudnnConvolutionDescriptor_t conv3Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv3Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv3Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv3Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max2OutDes, w3Des, conv3Des, conv3OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv3Algo)); cudnnTensorDescriptor_t max3OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max3OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max3OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 52, 52)); cudnnPoolingDescriptor_t max3Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max3Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max3Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w4Des; cudnnCheck(cudnnCreateFilterDescriptor(&w4Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w4Des, CUDNN_DATA_FLOAT, 128, 64, 3, 3)); //(52, 52, 128) cudnnTensorDescriptor_t conv4OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv4OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv4OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 128, 52, 52)); cudnnConvolutionDescriptor_t conv4Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv4Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv4Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv4Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max3OutDes, w4Des, conv4Des, conv4OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv4Algo)); cudnnTensorDescriptor_t max4OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max4OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max4OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 128, 26, 26)); cudnnPoolingDescriptor_t max4Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max4Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max4Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); //[3,3,128,256] cudnnFilterDescriptor_t w5Des; cudnnCheck(cudnnCreateFilterDescriptor(&w5Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w5Des, CUDNN_DATA_FLOAT, 256, 128, 3, 3)); //(26, 26, 256) cudnnTensorDescriptor_t conv5OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv5OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv5OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 26, 26)); cudnnConvolutionDescriptor_t conv5Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv5Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv5Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv5Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max4OutDes, w5Des, conv5Des, conv5OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv5Algo)); cudnnTensorDescriptor_t max5OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max5OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max5OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 13, 13)); cudnnPoolingDescriptor_t max5Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max5Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max5Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w6Des; cudnnCheck(cudnnCreateFilterDescriptor(&w6Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w6Des, CUDNN_DATA_FLOAT, 512, 256, 3, 3)); //(13, 13, 512) cudnnTensorDescriptor_t conv6OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv6OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv6OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 512, 13, 13)); cudnnConvolutionDescriptor_t conv6Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv6Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv6Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv6Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max5OutDes, w6Des, conv6Des, conv6OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv6Algo)); cudnnTensorDescriptor_t max6OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max6OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max6OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 512, 13, 13)); cudnnPoolingDescriptor_t max6Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max6Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max6Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 1, 1)); cudnnFilterDescriptor_t w7Des; cudnnCheck(cudnnCreateFilterDescriptor(&w7Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w7Des, CUDNN_DATA_FLOAT, 1024, 512, 3, 3)); //(13 x 13 x 1024) cudnnTensorDescriptor_t conv7OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv7OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv7OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 13, 13)); cudnnConvolutionDescriptor_t conv7Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv7Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv7Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv7Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max6OutDes, w7Des, conv7Des, conv7OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv7Algo)); cudnnFilterDescriptor_t w8Des; cudnnCheck(cudnnCreateFilterDescriptor(&w8Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w8Des, CUDNN_DATA_FLOAT, 1024, 1024, 3, 3)); //(13 x 13 x 1024) cudnnTensorDescriptor_t conv8OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv8OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv8OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 13, 13)); cudnnConvolutionDescriptor_t conv8Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv8Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv8Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv8Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, conv7OutDes, w8Des, conv8Des, conv8OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv8Algo)); //[1,1,1024,125] cudnnFilterDescriptor_t w9Des; cudnnCheck(cudnnCreateFilterDescriptor(&w9Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w9Des, CUDNN_DATA_FLOAT, 125, 1024, 1, 1)); //copy weights to GPU cudaCheck(hipMemcpy(d_w9, w9, 1024 * 125 * sizeof(float), hipMemcpyHostToDevice)); //(13 x 13 x 125) cudnnTensorDescriptor_t conv9OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv9OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv9OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 125, 13, 13)); cudnnConvolutionDescriptor_t conv9Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv9Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv9Des, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv9Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, conv8OutDes, w9Des, conv9Des, conv9OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv9Algo)); cudnnTensorDescriptor_t softmaxInputDes; cudnnCheck(cudnnCreateTensorDescriptor(&softmaxInputDes)); cudnnCheck(cudnnSetTensor4dDescriptor(softmaxInputDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 5, 20, 13, 13)); cudnnTensorDescriptor_t softmaxOutDes; cudnnCheck(cudnnCreateTensorDescriptor(&softmaxOutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(softmaxOutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 5, 20, 13, 13)); int numBlocks[8] = { ceil(sqrt((416 * 416 * 16) / shift)) ,ceil(sqrt((208 * 208 * 32) / shift)) , ceil(sqrt((104 * 104 * 64) / shift)) , ceil(sqrt((52 * 52 * 128) / shift)) , ceil(sqrt((26 * 26 * 256) / shift)) , ceil(sqrt((13 * 13 * 512) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) }; //-------------------------------------------------------START------------------------------------------ char* imagePaths[8] = { "dog.jpg","person.jpg","plane.jpg","motor.jpg","tv.jpg","horse.jpg" , "bus.jpg","bottle.jpg"}; cv::Mat image[8]; for (int i = 0; i < 8; i++) { image[i] = load_image(imagePaths[i]); } float* h_image = (float*)malloc(416 * 416 * 3 * sizeof(float)); for (int i = 0; i < 8; i++) { long t1 = clock(); hipMemcpy(d_input, image[i].ptr<float>(0), imageH*imageW * 3 * sizeof(float), hipMemcpyHostToDevice); std::cout << imagePaths[i] << "\n"; //--------------------------------------------------------conv1---------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, inputDes, w1Des, conv1Des, conv1OutDes, conv1Algo, &space)); if (i == 0) { cudaCheck(hipMalloc(&(workSpace[0]), space)); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, inputDes, d_input, w1Des, d_w1, conv1Des, conv1Algo, workSpace[0], space, beta, conv1OutDes, d_conv1Out)); add_biase << <dim3(416, 416), 1 >> >(d_conv1Out, d_b1, 416 * 416 * 16, 416 * 416); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 1------------------------------------------------------------------ //leaky_relu << <dim3(1665, 1665), 1 >> > (d_conv1Out, .1, 416 * 416 * 16); //int x = ceil(sqrt((416 * 416 * 16) / ( threadsPerBlock))); //std::cout << "x = " << x << "\n"; //leaky_relu_v2 << < dim3(x, x), threadsPerBlock >> > (d_conv1Out, .1, 416 * 416 * 16); //leaky_relu_v3 << <dim3(416,416),1 >> > (d_conv1Out, .1, 416 * 416 * 16, 416 * 416); leaky_relu_v4 << <dim3(numBlocks[0], numBlocks[0]), 1 >> > (d_conv1Out, .1, 416 * 416 * 16, shift); //----------------------------------------------------max 1---------------------------------------------------------------- // MaxPooling 22 2 (208, 208, 16) cudnnCheck(cudnnPoolingForward(cudnn, max1Des, alpha, conv1OutDes, d_conv1Out, beta, max1OutDes, d_max1Out)); //--------------------------------------------------------conv2------------------------------------------------------------------- //[3,3,16,32] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max1OutDes, w2Des, conv2Des, conv2OutDes, conv2Algo, &space)); if (i == 0) { cudaCheck(hipMalloc(&workSpace[1], space)); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max1OutDes, d_max1Out, w2Des, d_w2, conv2Des, conv2Algo, workSpace[1], space, beta, conv2OutDes, d_conv2Out)); add_biase << <dim3(208, 208), 1 >> >(d_conv2Out, d_b2, 208 * 208 * 32, 208 * 208); // to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 2------------------------------------------------------------------ //(208, 208, 32) //leaky_relu << <dim3(1180, 1180), 1 >> > (d_conv2Out, .1, 208 * 208 * 32); //leaky_relu_v3 << <dim3(208,208),1 >> > (d_conv2Out, .1, 208 * 208 * 32, 208 * 208); leaky_relu_v4 << <dim3(numBlocks[1], numBlocks[1]), 1 >> > (d_conv2Out, .1, 208 * 208 * 32, shift); //----------------------------------------------------max 2---------------------------------------------------------------- //MaxPooling 22 2 (104, 104, 32) cudnnCheck(cudnnPoolingForward(cudnn, max2Des, alpha, conv2OutDes, d_conv2Out, beta, max2OutDes, d_max2Out)); //--------------------------------------------------------conv3------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max2OutDes, w3Des, conv3Des, conv3OutDes, conv3Algo, &space)); if (i == 0) { hipMalloc(&workSpace[2], space); totalSpace += space; } long m = clock(); cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max2OutDes, d_max2Out, w3Des, d_w3, conv3Des, conv3Algo, workSpace[2], space, beta, conv3OutDes, d_conv3Out)); cout << "time for conv 3 " << clock() - m << "\n"; //don't forget to add the biases add_biase << <dim3(104, 104), 1 >> >(d_conv3Out, d_b3, 104 * 104 * 64, 104 * 104); //-----------------------------------------------------relu 3------------------------------------------------------------------ ////(104, 104, 64) //leaky_relu << <dim3(835, 835), 1 >> > (d_conv3Out, .1, 104 * 104 * 64); //leaky_relu_v3 << <dim3(104, 104), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, 104 * 104); leaky_relu_v4 << <dim3(numBlocks[2], numBlocks[2]), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, shift); //----------------------------------------------------max 3---------------------------------------------------------------- //MaxPooling 22 2 (52, 52, 64) cudnnCheck(cudnnPoolingForward(cudnn, max3Des, alpha, conv3OutDes, d_conv3Out, beta, max3OutDes, d_max3Out)); //--------------------------------------------------------conv4------------------------------------------------------------------- //[3,3,64,128] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max3OutDes, w4Des, conv4Des, conv4OutDes, conv4Algo, &space)); if (i == 0) { hipMalloc(&workSpace[3], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max3OutDes, d_max3Out, w4Des, d_w4, conv4Des, conv4Algo, workSpace[3], space, beta, conv4OutDes, d_conv4Out)); //don't forget to add the biases //cout << "time for conv 2 " << clock() - m << "\n"; add_biase << <dim3(52, 52), 1 >> >(d_conv4Out, d_b4, 52 * 52 * 128, 52 * 52); //test(d_conv4Out, 0, 16); //test(d_conv4Out, 128, 128 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_conv4Out, 52 * 128, 52 * 128 + 16); //test(d_conv4Out, 52 * 128 + 128, 52 * 128 + 128 + 16); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 4------------------------------------------------------------------ ////(52, 52, 128) //leaky_relu << <dim3(600, 600), 1 >> > (d_conv4Out, .1, 52 * 52 * 128); //leaky_relu_v3 << <dim3(52, 52), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, 52 * 52); leaky_relu_v4 << <dim3(numBlocks[3], numBlocks[3]), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, shift); //----------------------------------------------------max 4---------------------------------------------------------------- //MaxPooling 22 2 (26, 26, 128) cudnnCheck(cudnnPoolingForward(cudnn, max4Des, alpha, conv4OutDes, d_conv4Out, beta, max4OutDes, d_max4Out)); //test(d_max4Out, 0, 16); //test(d_max4Out, 128, 128 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_max4Out, 26 * 128, 26 * 128 + 16); //test(d_max4Out, 26 * 128 + 128, 26 * 128 + 128 + 16); //--------------------------------------------------------conv5------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max4OutDes, w5Des, conv5Des, conv5OutDes, conv5Algo, &space)); if (i == 0) { hipMalloc(&workSpace[4], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max4OutDes, d_max4Out, w5Des, d_w5, conv5Des, conv5Algo, workSpace[4], space, beta, conv5OutDes, d_conv5Out)); //don't forget to add the biases add_biase << <dim3(28, 28), 1 >> >(d_conv5Out, d_b5, 26 * 26 * 256, 26 * 26); //test(d_conv5Out, 0, 16); //test(d_conv5Out, 256, 256 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_conv5Out, 26 * 256, 26 * 256 + 16); //test(d_conv5Out, 26 * 256 + 256, 26 * 256 + 256 + 16); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 5------------------------------------------------------------------ ////(26, 26, 256) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv5Out, .1, 26 * 26 * 256); //leaky_relu_v3 << <dim3(26, 26), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, 26 * 26); leaky_relu_v4 << <dim3(numBlocks[4], numBlocks[4]), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, shift); //----------------------------------------------------max 5---------------------------------------------------------------- //MaxPooling 22 2 (13, 13, 256) cudnnCheck(cudnnPoolingForward(cudnn, max5Des, alpha, conv5OutDes, d_conv5Out, beta, max5OutDes, d_max5Out)); //--------------------------------------------------------conv6------------------------------------------------------------------- //[3,3,256,512] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max5OutDes, w6Des, conv6Des, conv6OutDes, conv6Algo, &space)); if (i == 0) { hipMalloc(&workSpace[5], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max5OutDes, d_max5Out, w6Des, d_w6, conv6Des, conv6Algo, workSpace[5], space, beta, conv6OutDes, d_conv6Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv6Out, d_b6, 13 * 13 * 512, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 6------------------------------------------------------------------ ////(13, 13, 512) //leaky_relu << <dim3(300, 300), 1 >> > (d_conv6Out, .1, 13 * 13 * 512); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, 13 * 13); leaky_relu_v4 << <dim3(numBlocks[5], numBlocks[5]), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, shift); //----------------------------------------------------max 6---------------------------------------------------------------- //MaxPooling 22 1 (13, 13, 512) cudnnCheck(cudnnPoolingForward(cudnn, max6Des, alpha, conv6OutDes, d_conv6Out, beta, max6OutDes, d_max6Out)); //--------------------------------------------------------conv7------------------------------------------------------------------- //[3,3,512,1024] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max6OutDes, w7Des, conv7Des, conv7OutDes, conv7Algo, &space)); if (i == 0) { hipMalloc(&workSpace[6], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max6OutDes, d_max6Out, w7Des, d_w7, conv7Des, conv7Algo, workSpace[6], space, beta, conv7OutDes, d_conv7Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv7Out, d_b7, 13 * 13 * 1024, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 7------------------------------------------------------------------ ////(13 x 13 x 1024) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, 13 * 13); leaky_relu_v4 << <dim3(numBlocks[6], numBlocks[6]), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, shift); //--------------------------------------------------------conv8------------------------------------------------------------------- //[3,3,1024,1024] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, conv7OutDes, w8Des, conv8Des, conv8OutDes, conv8Algo, &space)); if (i == 0) { hipMalloc(&workSpace[7], space); totalSpace += space; } //cout << "total space " << totalSpace/(1024*1024) << " MB\n"; cudnnCheck(cudnnConvolutionForward(cudnn, alpha, conv7OutDes, d_conv7Out, w8Des, d_w8, conv8Des, conv8Algo, workSpace[7], space, beta, conv8OutDes, d_conv8Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv8Out, d_b8, 13 * 13 * 1024, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 8------------------------------------------------------------------ ////(13 x 13 x 1024) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, 13 * 13); //x = ceil(sqrt((13 * 13 * 1024) / shift)); leaky_relu_v4 << <dim3(numBlocks[7], numBlocks[7]), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, shift); //--------------------------------------------------------conv9------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, conv8OutDes, w9Des, conv9Des, conv9OutDes, conv9Algo, &space)); if (i == 0) { hipMalloc(&workSpace[8], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, conv8OutDes, d_conv8Out, w9Des, d_w9, conv9Des, conv9Algo, workSpace[8], space, beta, conv9OutDes, d_conv9Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv9Out, d_b9, 13 * 13 * 125, 13 * 13); //another optimization separate first then sigmoid exp use the predefined ones sigmoid_exp << <dim3(150, 150), 1 >> > (d_conv9Out, d_anchors, 13 * 13 * 125); separate_data << <dim3(150, 150), 1 >> > (d_conv9Out, d_boxes_dims, d_predictions, d_classes, 13 * 13 * 125); cudnnCheck(cudnnSoftmaxForward(cudnn, CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, alpha, softmaxInputDes, d_classes, beta, softmaxOutDes, d_classes_softmax)); scores << <dim3(32, 32), 1 >> > (d_classes_softmax, d_predictions, 13 * 13 * 5); bool_arr << <dim3(30, 30), 1 >> >(d_boxes, 13 * 13 * 5, false); filter << < dim3(150, 150), 1 >> > (d_classes_softmax, d_boxes, 0.3, 13 * 13 * 5 * 20); get_max_scores << <dim3(30, 30), 1 >> > (d_classes_softmax, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 13 * 13 * 5); calculate_points << <dim3(30, 30), 1 >> > (d_boxes_dims, d_points, d_boxes, 13 * 13 * 5); //hipDeviceSynchronize(); non_max_supression << < dim3(30, 30), 1 >> > (d_points, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 0.3, 13 * 13 * 5); cudaCheck(hipMemcpy(h_boxes, d_boxes, 13 * 13 * 5 * sizeof(bool), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(h_maxScorePerBox, d_maxScorePerBox, 13 * 13 * 5 * sizeof(float), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(h_maxScoreIndex, d_maxScoreIndex, 13 * 13 * 5 * sizeof(int), hipMemcpyDeviceToHost)); //cudaCheck(hipMemcpy(h_boxes_dims, d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float), hipMemcpyDeviceToHost)); cudaCheck(hipMemcpy(h_points, d_points, 13 * 13 * 5 * 4 * sizeof(float), hipMemcpyDeviceToHost)); cv::Mat output(416, 416, CV_8UC3); cv::normalize(image[i], output, 0.0, 255.0, cv::NORM_MINMAX); for (int i = 0; i < 13 * 13 * 5; i++) { if (h_boxes[i]) { int index = i * 4; int left = h_points[index]; int top = h_points[index + 1]; int right = h_points[index + 2]; int bottom = h_points[index + 3]; float confidence = h_maxScorePerBox[i]; string className = classes[h_maxScoreIndex[i]]; std::cout << "( " << left << " , " << top << " ) , (" << right << " , " << bottom << " ) class " << className << " with prop " << confidence << "\n"; //threashold boxes left = (left <= 416) ? left : 416; top = (top <= 416) ? top : 416; right = (right <= 416) ? right : 416; bottom = (bottom <= 416) ? bottom : 416; cv::rectangle(output, cv::Point(left, top), cv::Point(right, bottom), colors[h_maxScoreIndex[i]], 3); //draw << <dim3(416, 416), 1 >> > (d_input, left, top, right, bottom, colors[h_maxScoreIndex[i]].val[0], //colors[h_maxScoreIndex[i]].val[1], colors[h_maxScoreIndex[i]].val[2], 416, 416); } } cudaCheck(hipMemcpy(h_image, d_input, 416 * 416 * 3 * sizeof(float), hipMemcpyDeviceToHost)); //cv::Mat output0(416, 416, CV_32FC3,h_image); //cv::normalize(output0, output, 0.0, 255.0, cv::NORM_MINMAX); //cv::cvtColor(output, output, CV_RGB2BGR); //cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX); long t2 = clock(); cout << "time = " << t2 - t1 << "\n"; string num = std::to_string(i); string file = "output" + num + ".png"; save_image(file.c_str(), output); } //to be space effecient free workspace but make sure it doesn't include any data related to convolution cout << "total space " << totalSpace / (1024 * 1024) << "MB\n"; }
7b59293c8b2d54608249f7be3e6b1fd143e95027.cu
//this version is 233 ms #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<iostream> #include<cudnn.h> #include <stdio.h> #include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include<cublas_v2.h> #include<string> #include<fstream> #include<cmath> #include<ctime> using namespace std; void readWeights(float* weights, int m/*output*/, int n/*input*/, int h, int w, string baseFileName, bool readWeights = true) { string fileName = "weights2/" + baseFileName; if (readWeights) { fileName += "Weights.data"; } else { fileName += "Biases.data"; } ifstream in(fileName, ios::in | ios::binary); //cout << fileName << "\n"; if (!in.is_open()) { cout << "file " << baseFileName << " didn't open \n"; return; } in.read((char*)weights, m*n*h*w * sizeof(float)); in.close(); //cout << baseFileName << " : " << weights[0] << " " << weights[1] << "\n"; } #define cudnnCheck(exp){\ cudnnStatus_t status=(exp);\ if(status!=CUDNN_STATUS_SUCCESS){\ std::cout<<"Error at line "<<__LINE__<<" "<<cudnnGetErrorString(status)<<"\n";\ std::exit(EXIT_FAILURE);\ }\ }\ #define cudaCheck(exp) {\ cudaError_t status=(exp);\ if(status!=cudaSuccess){\ cerr<<"error at cuda "<<__LINE__<<" "<<cudaGetErrorString(status)<<"\n";\ exit(EXIT_FAILURE);\ }\ }\ cv::Mat load_image(const char* image_path) { cv::Mat image = cv::imread(image_path, CV_LOAD_IMAGE_COLOR); if (image.empty()) { cerr << "couldn't open image\n"; } cv::cvtColor(image, image, cv::COLOR_BGR2RGB); image.convertTo(image, CV_32FC3); cv::normalize(image, image, 0, 1, cv::NORM_MINMAX); cv::Mat resizedImage(416, 416, CV_32FC2); cv::resize(image, resizedImage, cv::Size(416, 416), 0, 0, cv::INTER_CUBIC); if (resizedImage.empty())cerr << "resized image empty\n"; //cout << "ok\n"; return resizedImage; } void save_image(const char* output_filename, cv::Mat output_image) { //cv::cvtColor(output_image, output_image, cv::COLOR_RGB2BGR); //cv::normalize(output_image, output_image, 0.0, 255.0, cv::NORM_MINMAX); //output_image.convertTo(output_image, CV_8UC3); cv::imwrite(output_filename, output_image); } //incomplete __global__ void leaky_relu_v2(float* d_data, float alpha, int size) { int index = (blockIdx.y*gridDim.x + blockIdx.x); if (index < size) { float x = d_data[index]; if (x<0) d_data[index] = alpha*x; } } //try constant shift __global__ void leaky_relu_v3(float* d_data, float alpha, int size, int step) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < step) { int channels = (size / step); index *= channels; for (int i = index; i < index + channels; i++) { float x = d_data[i]; if (x<0) d_data[i] = alpha*x; } } } __global__ void leaky_relu_v4(float* d_data, float alpha, int size, int shift) { int index = blockIdx.y*gridDim.x + blockIdx.x; index *= shift; if (index < size - shift) { for (int i = index; i < index + shift; i++) { float x = d_data[i]; if (x<0) d_data[i] = alpha*x; } } } __global__ void leaky_relu(float* d_data, float alpha, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float x = d_data[index]; if (x<0) d_data[index] = alpha*x; } } //step is width*height of the output of convolution /* @param size is width x height x channels @Param step is width x height the data in the format HxWxC k is computed as index%(size/step) */ __global__ void add_biase(float* d_data, float* biases, int size/*WxHxC*/, int step/*WxH*/) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < step) { int biaseSize = (size / step); index *= biaseSize; for (int i = 0; i < biaseSize; i++) { d_data[index + i] += biases[i]; } } } __device__ float iou(float bx1x1, float bx1y1, float bx1x2, float bx1y2, float bx2x1, float bx2y1, float bx2x2, float bx2y2) { float x1 = (bx1x1 > bx2x1) ? bx1x1 : bx2x1; float y1 = (bx1y1> bx2y1) ? bx1y1 : bx2y1; float x2 = (bx1x2 > bx2x2) ? bx2x2 : bx1x2; float y2 = (bx1y2 > bx2y2) ? bx2y2 : bx1y2; float A1 = (bx1x2 - bx1x1)*(bx1y2 - bx1y1); float A2 = (bx2x2 - bx2x1)*(bx2y2 - bx2y1); float A_inter = ((x2 - x1) > 0 ? (x2 - x1) : 0)*((y2 - y1) > 0 ? (y2 - y1) : 0); return(A_inter / (A1 + A2 - A_inter)); } //consider calculating the necessary points only __global__ void calculate_points(float* boxes_dims, float* points, bool* boxes, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { //int left = h_boxes_dims[index] - (h_boxes_dims[index + 2] / 2.0); //int right = h_boxes_dims[index] + (h_boxes_dims[index + 2] / 2.0); //int top = h_boxes_dims[index + 1] - (h_boxes_dims[index + 3] / 2.0); //int bottom = h_boxes_dims[index + 1] + (h_boxes_dims[index + 3] / 2.0); int step = index * 4; float center_x = boxes_dims[step]; float w = boxes_dims[step + 2]; float center_y = boxes_dims[step + 1]; float h = boxes_dims[step + 3]; points[step] = center_x - ((w) / 2.0); points[step + 2] = center_x + ((w) / 2.0); points[step + 1] = center_y - ((h) / 2.0); points[step + 3] = center_y + ((h) / 2.0); } } __global__ void non_max_supression(float* points, bool* boxes, float* maxClassScore, int* maxClassIndex, float threashold = 0.3, int size = 13 * 13 * 5) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float maxClass = maxClassScore[index]; if (maxClass < 0.3) { boxes[index] = false; return; } int maxClassInd = maxClassIndex[index]; float x1 = points[index * 4]; float y1 = points[index * 4 + 1]; float x2 = points[index * 4 + 2]; float y2 = points[index * 4 + 3]; for (int i = 0; i < size; i++) { if (boxes[i] && i != index) { if (maxClassInd == maxClassIndex[i]) { if (maxClass > maxClassScore[i]) { float x = iou(x1, y1, x2, y2, points[i * 4] , points[i * 4 + 1], points[i * 4 + 2], points[i * 4 + 3]); if (x >= threashold) { boxes[i] = false; } } } } } } } //20 classes __global__ void exp(float* classes, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x; if (index<size) { classes[index] = exp(classes[index]); } } __global__ void softmax(float* classes, int offset, float sum) { if (threadIdx.x < 20) { classes[threadIdx.x + offset] /= sum; } } __global__ void filter(float* classes, bool* boxes, float threshold = 0.4, int size = 13 * 13 * 5 * 20) { int index = (blockIdx.y*gridDim.x) + blockIdx.x; if (index < size) { if (classes[index] >= threshold) { boxes[index / 20] = true; //printf("index %d value %f\n", index, classes[index]); } } } //blocks*threads __global__ void sigmoid(float* x, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x + threadIdx.x; if (index<size) { x[index] = 1 / (1 + exp(-1 * x[index])); } } //calculate centers of the box and the width and height //calculate the necessary ones __global__ void calculate_box_dims(float* x, float* d_anchors, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { //center_x = (float(col) + sigmoid(tx)) * 32.0 x[index] = (((index / (4)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; //center_y = (float(row) + sigmoid(ty)) * 32.0 x[index + 1] = ((index / (13 * 4)) + (1.0 / (1 + expf(-1 * x[index + 1]))))*32.0; //roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0 x[index + 2] = expf(x[index + 2])*d_anchors[2 * ((index / 25) % 5)] * 32.0; //roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0 x[index + 3] = expf(x[index + 3])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0; } } __global__ void sigmoid_exp(float* x, float* d_anchors, int size) { int index = (blockIdx.y*gridDim.x) + blockIdx.x; if (index < size) { int cond = index % 25; switch (cond) { case 0: //center_x = (float(col) + sigmoid(tx)) * 32.0 x[index] = (((index / (125)) % 13) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; break; case 1: //center_y = (float(row) + sigmoid(ty)) * 32.0 x[index] = ((index / (13 * 125)) + (1.0 / (1 + expf(-1 * x[index]))))*32.0; break; case 2: //roi_w = np.exp(tw) * anchors[2 * box + 0] * 32.0 x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5)] * 32.0; break; case 3: //roi_h = np.exp(th) * anchors[2 * box + 1] * 32.0 x[index] = expf(x[index])*d_anchors[2 * ((index / 25) % 5) + 1] * 32.0; break; case 4: //confidence //if (index == 4)printf("data sample %f\n\n", x[index]); x[index] = (1.0 / (1 + expf(-1 * x[index]))); break; } //if (index <25)printf("data sample %d %f\n",index, x[index]); } } __global__ void scores(float* classes, float* confidence, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { float x = confidence[index]; int step = index * 20; for (int i = 0; i < 20; i++) { classes[step + i] *= x; } } } __global__ void get_max_scores(float* classes, bool* boxes, float* maxScores, int* maxIndecies, int size = 13 * 13 * 5) { int index = blockIdx.y*gridDim.x + blockIdx.x; int classIndex = 20 * index; if (index < size) { if (boxes[index]) { float maxClassScore = classes[classIndex]; int maxClassIndex = 0; float tmp = 0; for (int i = classIndex + 1; i < classIndex + 19; i++) { tmp = classes[i]; if (tmp > maxClassScore) { maxClassScore = tmp; maxClassIndex = i - classIndex; } } //printf("from get_max_score %d %d\n", index,classIndex); maxScores[index] = maxClassScore; maxIndecies[index] = maxClassIndex; } } } __global__ void bool_arr(bool* d_boxes, int size, bool value = false) { int index = blockIdx.y*blockDim.x + blockIdx.x; if (index < size) { d_boxes[index] = value; } } __global__ void separate_data(float* predictions, float* boxes, float* confidence, float* classes, int size) { int index = blockIdx.y*gridDim.x + blockIdx.x; if (index < size) { int x = index % 25; if (x > 4) { classes[(index / 25) * 20 + (x - 5)] = predictions[index]; } else if (x == 4) { confidence[(index / 25)] = predictions[index]; } else { //centers and bounding boxes boxes[(index / 25) * 4 + x] = predictions[index]; } } } //draw colored rectangles around objects //scale colors first //thickness = 4 pixels //size is WxH __global__ void draw(float* d_image, int x1, int y1, int x2, int y2, float r, float g, float b, int w, int h, int thickness = 4) { int index = blockIdx.y*gridDim.x + blockIdx.x; //scale for the three channels if (index < w*h) { //index *= 3; int xPos = (index / 3) % w; int yPos = (index / (3 * w)); //on the same vertical line //increase x axis if ((yPos == y1 || yPos == y2) && (xPos >= x1 && xPos <= x2)) { for (int i = 0; i < thickness; i++) { if (index < w*h) { //r d_image[index] = 0; //g d_image[index + 1] = 0; //b d_image[index + 2] = 0; //next column ie next x in image terminology as the row here is column there //remember image is at format NHWC index += 3; } } } else if ((xPos == x1 || xPos == x2) && (yPos >= y1 && yPos <= y2)) { for (int i = 0; i < thickness; i++) { if (index < w*h) { //r d_image[index] = 0; //g d_image[index + 1] = 0; //b d_image[index + 2] = 0; } index += (3 * h); } } } } template<class T> void test(T* host_data, T* device_data, int start, int end) { cout << "host data \n\n"; for (int i = start; i < end; i++) { cout << host_data[i] << " "; } cout << "\n\n"; T* tmp = (T*)malloc(end * sizeof(T)); cudaMemcpy(tmp, device_data, end * sizeof(T), cudaMemcpyDeviceToHost); cout << "device data \n\n"; for (int i = start; i < end; i++) { cout << tmp[i] << " "; } cout << "\n\n"; } template<class T> void test(T* device_data, int start, int end) { T* tmp = (T*)malloc(end * sizeof(T)); cudaCheck(cudaMemcpy(tmp, device_data, (end) * sizeof(T), cudaMemcpyDeviceToHost)); cout << "device data \n\n"; for (int i = start; i < end; i++) { cout << tmp[i] << " "; } cout << "\n\n"; //if (tmp[3] == true)cout << "True \n"; } template<class T> void test(T* device_data, int row, int col, int w, int step, int channels, int times, string name, int offset = 0, bool xDirection = true) { cout << name << "\n"; for (int j = 0; j < times; j++) { test(device_data, (col*w*channels + row*channels + j*step + offset), (col*w*channels + row*channels + (j + 1)*step)); //cout << (col*step*channels + row*channels + j*step + offset) <<" "<< (col*step*channels + row*channels + (j + 1)*step) << "\n"; } } //--------------------------------------things to be done for optimization--------------------------------------------------- //to be more memory effecient delete the unneeded values and re assign them // this maybe time costy //test that //to be space effecient free workspace but make sure it doesn't include any data related to convolution //make sure when it crashes because of memory to print that //---------------------------------------------------------------------------------------------------------------------------- #define threadsPerBlock 32 #define shift 500 int main() { // Layer kernel stride output shape // -------------------------------------------- - //Input(416,416,3) // Convolution 3×3 1 (416, 416, 16) // MaxPooling 2×2 2 (208, 208, 16) // Convolution 3×3 1 (208, 208, 32) // MaxPooling 2×2 2 (104, 104, 32) // Convolution 3×3 1 (104, 104, 64) // MaxPooling 2×2 2 (52, 52, 64) // Convolution 3×3 1 (52, 52, 128) // MaxPooling 2×2 2 (26, 26, 128) // Convolution 3×3 1 (26, 26, 256) // MaxPooling 2×2 2 (13, 13, 256) // Convolution 3×3 1 (13, 13, 512) // MaxPooling 2×2 1 (13, 13, 512) // Convolution 3×3 1 (13, 13, 1024) // Convolution 3×3 1 (13, 13, 1024) // Convolution 1×1 1 (13, 13, 125) // -------------------------------------------- - //all MAX POOLING is valid padding except last one but padding = 0 //all CONV are SAME padding with p = 1 int imageH = 416, imageW = 416; float x = 1.0, y = 0.0; float* alpha = &x; float *beta = &y; long long totalSpace = 0; size_t space = 0; //std::cout << "ok\n"; cudnnHandle_t cudnn; cudnnCheck(cudnnCreate(&cudnn)); //input layer cudnnTensorDescriptor_t inputDes; cudnnCheck(cudnnCreateTensorDescriptor(&inputDes)); cudnnCheck(cudnnSetTensor4dDescriptor(inputDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 3, imageH, imageW)); //cv::Mat image = load_image("person.jpg"); //std::cout << "image loaded with dims " << image.cols << " X " << image.rows << "\n"; //for (int i = 0; i < 20; i++)std::cout << image.at<float>(cv::Point(0, i)) << " "; //std::cout << "\n\n"; float* d_input; cudaMalloc(&d_input, imageH*imageW * 3 * sizeof(float)); totalSpace += imageH*imageW * 3 * sizeof(float); //load W1 float* w1 = (float*)malloc(16 * 3 * 3 * 3 * sizeof(float)); readWeights(w1, 16, 3, 3, 3, "conv1"); float* d_w1; cudaCheck(cudaMalloc(&d_w1, 16 * 3 * 3 * 3 * sizeof(float))); totalSpace += 16 * 3 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(cudaMemcpy(d_w1, w1, 16 * 3 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice)); //(416, 416, 16) float* d_conv1Out; cudaCheck(cudaMalloc(&d_conv1Out, 16 * imageH * imageW * sizeof(float))); totalSpace += 16 * imageH * imageW * sizeof(float); //copy data to GPU //don't forget to add the biases float* b1 = (float*)malloc(16 * sizeof(float)); readWeights(b1, 16, 1, 1, 1, "conv1", false); float* d_b1; cudaCheck(cudaMalloc(&d_b1, 16 * sizeof(float))); cudaCheck(cudaMemcpy(d_b1, b1, 16 * sizeof(float), cudaMemcpyHostToDevice)); float* d_max1Out; cudaCheck(cudaMalloc(&d_max1Out, 208 * 208 * 16 * sizeof(float))); totalSpace += 208 * 208 * 16 * sizeof(float); //load W2 float* w2 = (float*)malloc(32 * 16 * 3 * 3 * sizeof(float)); readWeights(w2, 32, 16, 3, 3, "conv2"); float* d_w2; cudaCheck(cudaMalloc(&d_w2, 32 * 16 * 3 * 3 * sizeof(float))); totalSpace += 32 * 16 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(cudaMemcpy(d_w2, w2, 32 * 16 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice)); float* d_conv2Out; cudaCheck(cudaMalloc(&d_conv2Out, 32 * 208 * 208 * sizeof(float))); totalSpace += 32 * 208 * 208 * sizeof(float); //don't forget to add the biases float* b2 = (float*)malloc(32 * sizeof(float)); readWeights(b2, 32, 1, 1, 1, "conv2", false); float* d_b2; cudaCheck(cudaMalloc(&d_b2, 32 * sizeof(float))); cudaCheck(cudaMemcpy(d_b2, b2, 32 * sizeof(float), cudaMemcpyHostToDevice)); //load W3 float* w3 = (float*)malloc(64 * 32 * 3 * 3 * sizeof(float)); readWeights(w3, 64, 32, 3, 3, "conv3"); float* d_w3; cudaMalloc(&d_w3, 64 * 32 * 3 * 3 * sizeof(float)); totalSpace += 64 * 32 * 3 * 3 * sizeof(float); //copy weights to GPU cudaMemcpy(d_w3, w3, 64 * 32 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); float* b3 = (float*)malloc(64 * sizeof(float)); readWeights(b3, 64, 1, 1, 1, "conv3", false); float* d_b3; cudaMalloc(&d_b3, 64 * sizeof(float)); cudaMemcpy(d_b3, b3, 64 * sizeof(float), cudaMemcpyHostToDevice); float* d_max3Out; cudaMalloc(&d_max3Out, 52 * 52 * 64 * sizeof(float)); totalSpace += 52 * 52 * 64 * sizeof(float); //load W4 float* w4 = (float*)malloc(128 * 64 * 3 * 3 * sizeof(float)); readWeights(w4, 128, 64, 3, 3, "conv4"); float* d_w4; cudaMalloc(&d_w4, 128 * 64 * 3 * 3 * sizeof(float)); totalSpace += 128 * 64 * 3 * 3 * sizeof(float); //copy weights to GPU cudaMemcpy(d_w4, w4, 128 * 64 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); float* d_conv4Out; cudaMalloc(&d_conv4Out, 128 * 52 * 52 * sizeof(float)); totalSpace += 128 * 52 * 52 * sizeof(float); float* b4 = (float*)malloc(128 * sizeof(float)); readWeights(b4, 128, 1, 1, 1, "conv4", false); float* d_b4; cudaMalloc(&d_b4, 128 * sizeof(float)); cudaMemcpy(d_b4, b4, 128 * sizeof(float), cudaMemcpyHostToDevice); float* d_max4Out; cudaMalloc(&d_max4Out, 26 * 26 * 128 * sizeof(float)); totalSpace += 26 * 26 * 128 * sizeof(float); //load W5 float* w5 = (float*)malloc(256 * 128 * 3 * 3 * sizeof(float)); readWeights(w5, 256, 128, 3, 3, "conv5"); float* d_w5; cudaMalloc(&d_w5, 256 * 128 * 3 * 3 * sizeof(float)); totalSpace += 256 * 128 * 3 * 3 * sizeof(float); //copy weights to GPU cudaMemcpy(d_w5, w5, 256 * 128 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); float* d_conv5Out; cudaMalloc(&d_conv5Out, 256 * 26 * 26 * sizeof(float)); totalSpace += 256 * 26 * 26 * sizeof(float); float* b5 = (float*)malloc(256 * sizeof(float)); readWeights(b5, 256, 1, 1, 1, "conv5", false); float* d_b5; cudaMalloc(&d_b5, 256 * sizeof(float)); cudaMemcpy(d_b5, b5, 256 * sizeof(float), cudaMemcpyHostToDevice); float* d_max5Out; cudaMalloc(&d_max5Out, 13 * 13 * 256 * sizeof(float)); totalSpace += 13 * 13 * 256 * sizeof(float); //load W6 float* w6 = (float*)malloc(512 * 256 * 3 * 3 * sizeof(float)); readWeights(w6, 512, 256, 3, 3, "conv6"); float* d_w6; cudaMalloc(&d_w6, 512 * 256 * 3 * 3 * sizeof(float)); totalSpace += 512 * 256 * 3 * 3 * sizeof(float); //copy weights to GPU cudaMemcpy(d_w6, w6, 512 * 256 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); float* d_conv6Out; cudaMalloc(&d_conv6Out, 512 * 13 * 13 * sizeof(float)); totalSpace += 512 * 13 * 13 * sizeof(float); float* b6 = (float*)malloc(512 * sizeof(float)); readWeights(b6, 512, 1, 1, 1, "conv6", false); float* d_b6; cudaMalloc(&d_b6, 512 * sizeof(float)); cudaMemcpy(d_b6, b6, 512 * sizeof(float), cudaMemcpyHostToDevice); //here there's padding and stride 1 float* d_max6Out; cudaMalloc(&d_max6Out, 13 * 13 * 512 * sizeof(float)); totalSpace += 13 * 13 * 512 * sizeof(float); //load W7 float* w7 = (float*)malloc(1024 * 512 * 3 * 3 * sizeof(float)); readWeights(w7, 1024, 512, 3, 3, "conv7"); float* d_w7; cudaMalloc(&d_w7, 1024 * 512 * 3 * 3 * sizeof(float)); totalSpace += 1024 * 512 * 3 * 3 * sizeof(float); //copy weights to GPU cudaMemcpy(d_w7, w7, 1024 * 512 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice); float* d_conv7Out; cudaMalloc(&d_conv7Out, 1024 * 13 * 13 * sizeof(float)); totalSpace += 1024 * 13 * 13 * sizeof(float); float* b7 = (float*)malloc(1024 * sizeof(float)); readWeights(b7, 1024, 1, 1, 1, "conv7", false); float* d_b7; cudaCheck(cudaMalloc(&d_b7, 1024 * sizeof(float))); cudaCheck(cudaMemcpy(d_b7, b7, 1024 * sizeof(float), cudaMemcpyHostToDevice)); //load W8 float* w8 = (float*)malloc(1024 * 1024 * 3 * 3 * sizeof(float)); readWeights(w8, 1024, 1024, 3, 3, "conv8", true); float* d_w8; cudaCheck(cudaMalloc(&d_w8, 1024 * 1024 * 3 * 3 * sizeof(float))); totalSpace += 1024 * 1024 * 3 * 3 * sizeof(float); //copy weights to GPU cudaCheck(cudaMemcpy(d_w8, w8, 1024 * 1024 * 3 * 3 * sizeof(float), cudaMemcpyHostToDevice)); float* d_conv8Out; cudaCheck(cudaMalloc(&d_conv8Out, 1024 * 13 * 13 * sizeof(float))); totalSpace += 1024 * 13 * 13 * sizeof(float); float* b8 = (float*)malloc(1024 * sizeof(float)); readWeights(b8, 1024, 1, 1, 1, "conv8", false); float* d_b8; cudaCheck(cudaMalloc(&d_b8, 1024 * sizeof(float))); cudaCheck(cudaMemcpy(d_b8, b8, 1024 * sizeof(float), cudaMemcpyHostToDevice)); //load W9 float* w9 = (float*)malloc(1024 * 125 * sizeof(float)); readWeights(w9, 1024, 125, 3, 3, "conv9", true); float* d_w9; cudaCheck(cudaMalloc(&d_w9, 1024 * 125 * sizeof(float))); totalSpace += 1024 * 125 * sizeof(float); float* d_conv9Out; cudaCheck(cudaMalloc(&d_conv9Out, 125 * 13 * 13 * sizeof(float))); totalSpace += 125 * 13 * 13 * sizeof(float); cout << "total space " << totalSpace / (1024 * 1024) << " MB\n"; float b9[125]; readWeights(b9, 125, 1, 1, 1, "conv9", false); float* d_b9; cudaCheck(cudaMalloc(&d_b9, 125 * sizeof(float))); float* d_classes_softmax; cudaCheck(cudaMalloc(&d_classes_softmax, 13 * 13 * 5 * 20 * sizeof(float))); cv::Scalar colors[20] = { cv::Scalar(254.0, 254.0, 254),cv::Scalar(239.88888888888889, 211.66666666666669, 127), cv::Scalar(225.77777777777777, 169.33333333333334, 0), cv::Scalar(211.66666666666669, 127.0, 254), cv::Scalar(197.55555555555557, 84.66666666666667, 127), cv::Scalar(183.44444444444443, 42.33333333333332, 0), cv::Scalar(169.33333333333334, 0.0, 254), cv::Scalar(155.22222222222223, -42.33333333333335, 127), cv::Scalar(141.11111111111111, -84.66666666666664, 0), cv::Scalar(127.0, 254.0, 254), cv::Scalar(112.88888888888889, 211.66666666666669, 127), cv::Scalar(98.77777777777777, 169.33333333333334, 0), cv::Scalar(84.66666666666667, 127.0, 254), cv::Scalar(70.55555555555556, 84.66666666666667, 127), cv::Scalar(56.44444444444444, 42.33333333333332, 0), cv::Scalar(42.33333333333332, 0.0, 254), cv::Scalar(28.222222222222236, -42.33333333333335, 127), cv::Scalar(14.111111111111118, -84.66666666666664, 0), cv::Scalar(0.0, 254.0, 254), cv::Scalar(-14.111111111111118, 211.66666666666669, 127) }; string classes[20] = { "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse" , "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor" }; //anchors = [1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52] float h_anchors[10] = { 1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52 }; float* d_anchors; cudaCheck(cudaMalloc(&d_anchors, 10 * sizeof(float))); float* d_boxes_dims; cudaCheck(cudaMalloc(&d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float))); float* d_predictions; cudaCheck(cudaMalloc(&d_predictions, 13 * 13 * 5 * sizeof(float))); float* d_classes; cudaCheck(cudaMalloc(&d_classes, 13 * 13 * 5 * 20 * sizeof(float))); cudaCheck(cudaMemcpy(d_anchors, h_anchors, 10 * sizeof(float), cudaMemcpyHostToDevice)); bool* d_boxes; cudaCheck(cudaMalloc(&d_boxes, 13 * 13 * 5 * sizeof(bool))); float* d_maxScorePerBox; cudaCheck(cudaMalloc(&d_maxScorePerBox, 13 * 13 * 5 * sizeof(float))); int* d_maxScoreIndex; cudaCheck(cudaMalloc(&d_maxScoreIndex, 13 * 13 * 5 * sizeof(int))); float* d_points; cudaCheck(cudaMalloc(&d_points, 13 * 13 * 5 * 4 * sizeof(float))); bool h_boxes[13 * 13 * 5]; float* h_points = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float)); float h_maxScorePerBox[13 * 13 * 5]; int h_maxScoreIndex[13 * 13 * 5]; float* h_boxes_dims = (float*)malloc(13 * 13 * 5 * 4 * sizeof(float)); cudaCheck(cudaMemcpy(d_b9, b9, 125 * sizeof(float), cudaMemcpyHostToDevice)); //workspases void* workSpace[9] = { nullptr }; //(16X3X3X3) cudnnFilterDescriptor_t w1Des; cudnnCheck(cudnnCreateFilterDescriptor(&w1Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w1Des, CUDNN_DATA_FLOAT, 16, 3, 3, 3)); cudnnTensorDescriptor_t conv1OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv1OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv1OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 16, 416, 416)); //cout << "output format NHWC \n"; cudnnConvolutionDescriptor_t conv1Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv1Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv1Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv1Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, inputDes, w1Des, conv1Des, conv1OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv1Algo)); cudnnTensorDescriptor_t max1OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max1OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max1OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 16, 208, 208)); //cout << "max1 out NHWC\n"; cudnnPoolingDescriptor_t max1Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max1Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max1Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w2Des; cudnnCheck(cudnnCreateFilterDescriptor(&w2Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w2Des, CUDNN_DATA_FLOAT, 32, 16, 3, 3)); //(208, 208, 32) cudnnTensorDescriptor_t conv2OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv2OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv2OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 32, 208, 208)); //cout << "conv2 out NHWC\n"; cudnnConvolutionDescriptor_t conv2Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv2Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv2Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv2Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max1OutDes, w2Des, conv2Des, conv2OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv2Algo)); float* d_max2Out; cudaMalloc(&d_max2Out, 104 * 104 * 32 * sizeof(float)); totalSpace += 104 * 104 * 32 * sizeof(float); cudnnTensorDescriptor_t max2OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max2OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max2OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 32, 104, 104)); cudnnPoolingDescriptor_t max2Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max2Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max2Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); //[3,3,32,64] cudnnFilterDescriptor_t w3Des; cudnnCheck(cudnnCreateFilterDescriptor(&w3Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w3Des, CUDNN_DATA_FLOAT, 64, 32, 3, 3)); //(104, 104, 64) cudnnTensorDescriptor_t conv3OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv3OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv3OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 104, 104)); float* d_conv3Out; cudaMalloc(&d_conv3Out, 64 * 104 * 104 * sizeof(float)); totalSpace += 64 * 104 * 104 * sizeof(float); cudnnConvolutionDescriptor_t conv3Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv3Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv3Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv3Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max2OutDes, w3Des, conv3Des, conv3OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv3Algo)); cudnnTensorDescriptor_t max3OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max3OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max3OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 64, 52, 52)); cudnnPoolingDescriptor_t max3Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max3Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max3Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w4Des; cudnnCheck(cudnnCreateFilterDescriptor(&w4Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w4Des, CUDNN_DATA_FLOAT, 128, 64, 3, 3)); //(52, 52, 128) cudnnTensorDescriptor_t conv4OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv4OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv4OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 128, 52, 52)); cudnnConvolutionDescriptor_t conv4Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv4Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv4Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv4Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max3OutDes, w4Des, conv4Des, conv4OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv4Algo)); cudnnTensorDescriptor_t max4OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max4OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max4OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 128, 26, 26)); cudnnPoolingDescriptor_t max4Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max4Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max4Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); //[3,3,128,256] cudnnFilterDescriptor_t w5Des; cudnnCheck(cudnnCreateFilterDescriptor(&w5Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w5Des, CUDNN_DATA_FLOAT, 256, 128, 3, 3)); //(26, 26, 256) cudnnTensorDescriptor_t conv5OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv5OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv5OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 26, 26)); cudnnConvolutionDescriptor_t conv5Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv5Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv5Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv5Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max4OutDes, w5Des, conv5Des, conv5OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv5Algo)); cudnnTensorDescriptor_t max5OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max5OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max5OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 256, 13, 13)); cudnnPoolingDescriptor_t max5Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max5Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max5Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 2, 2)); cudnnFilterDescriptor_t w6Des; cudnnCheck(cudnnCreateFilterDescriptor(&w6Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w6Des, CUDNN_DATA_FLOAT, 512, 256, 3, 3)); //(13, 13, 512) cudnnTensorDescriptor_t conv6OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv6OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv6OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 512, 13, 13)); cudnnConvolutionDescriptor_t conv6Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv6Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv6Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv6Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max5OutDes, w6Des, conv6Des, conv6OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv6Algo)); cudnnTensorDescriptor_t max6OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&max6OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(max6OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 512, 13, 13)); cudnnPoolingDescriptor_t max6Des; cudnnCheck(cudnnCreatePoolingDescriptor(&max6Des)); cudnnCheck(cudnnSetPooling2dDescriptor(max6Des, CUDNN_POOLING_MAX, 2, 2, 0, 0, 1, 1)); cudnnFilterDescriptor_t w7Des; cudnnCheck(cudnnCreateFilterDescriptor(&w7Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w7Des, CUDNN_DATA_FLOAT, 1024, 512, 3, 3)); //(13 x 13 x 1024) cudnnTensorDescriptor_t conv7OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv7OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv7OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 13, 13)); cudnnConvolutionDescriptor_t conv7Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv7Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv7Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv7Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, max6OutDes, w7Des, conv7Des, conv7OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv7Algo)); cudnnFilterDescriptor_t w8Des; cudnnCheck(cudnnCreateFilterDescriptor(&w8Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w8Des, CUDNN_DATA_FLOAT, 1024, 1024, 3, 3)); //(13 x 13 x 1024) cudnnTensorDescriptor_t conv8OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv8OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv8OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 1024, 13, 13)); cudnnConvolutionDescriptor_t conv8Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv8Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv8Des, 1, 1, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv8Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, conv7OutDes, w8Des, conv8Des, conv8OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv8Algo)); //[1,1,1024,125] cudnnFilterDescriptor_t w9Des; cudnnCheck(cudnnCreateFilterDescriptor(&w9Des)); cudnnCheck(cudnnSetFilter4dDescriptor(w9Des, CUDNN_DATA_FLOAT, 125, 1024, 1, 1)); //copy weights to GPU cudaCheck(cudaMemcpy(d_w9, w9, 1024 * 125 * sizeof(float), cudaMemcpyHostToDevice)); //(13 x 13 x 125) cudnnTensorDescriptor_t conv9OutDes; cudnnCheck(cudnnCreateTensorDescriptor(&conv9OutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(conv9OutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 1, 125, 13, 13)); cudnnConvolutionDescriptor_t conv9Des; cudnnCheck(cudnnCreateConvolutionDescriptor(&conv9Des)); cudnnCheck(cudnnSetConvolution2dDescriptor(conv9Des, 0, 0, 1, 1, 1, 1, CUDNN_CROSS_CORRELATION)); cudnnConvolutionFwdAlgo_t conv9Algo; cudnnCheck(cudnnGetConvolutionForwardAlgorithm(cudnn, conv8OutDes, w9Des, conv9Des, conv9OutDes, CUDNN_CONVOLUTION_FWD_PREFER_FASTEST, 0, &conv9Algo)); cudnnTensorDescriptor_t softmaxInputDes; cudnnCheck(cudnnCreateTensorDescriptor(&softmaxInputDes)); cudnnCheck(cudnnSetTensor4dDescriptor(softmaxInputDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 5, 20, 13, 13)); cudnnTensorDescriptor_t softmaxOutDes; cudnnCheck(cudnnCreateTensorDescriptor(&softmaxOutDes)); cudnnCheck(cudnnSetTensor4dDescriptor(softmaxOutDes, CUDNN_TENSOR_NHWC, CUDNN_DATA_FLOAT, 5, 20, 13, 13)); int numBlocks[8] = { ceil(sqrt((416 * 416 * 16) / shift)) ,ceil(sqrt((208 * 208 * 32) / shift)) , ceil(sqrt((104 * 104 * 64) / shift)) , ceil(sqrt((52 * 52 * 128) / shift)) , ceil(sqrt((26 * 26 * 256) / shift)) , ceil(sqrt((13 * 13 * 512) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) ,ceil(sqrt((13 * 13 * 1024) / shift)) }; //-------------------------------------------------------START------------------------------------------ char* imagePaths[8] = { "dog.jpg","person.jpg","plane.jpg","motor.jpg","tv.jpg","horse.jpg" , "bus.jpg","bottle.jpg"}; cv::Mat image[8]; for (int i = 0; i < 8; i++) { image[i] = load_image(imagePaths[i]); } float* h_image = (float*)malloc(416 * 416 * 3 * sizeof(float)); for (int i = 0; i < 8; i++) { long t1 = clock(); cudaMemcpy(d_input, image[i].ptr<float>(0), imageH*imageW * 3 * sizeof(float), cudaMemcpyHostToDevice); std::cout << imagePaths[i] << "\n"; //--------------------------------------------------------conv1---------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, inputDes, w1Des, conv1Des, conv1OutDes, conv1Algo, &space)); if (i == 0) { cudaCheck(cudaMalloc(&(workSpace[0]), space)); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, inputDes, d_input, w1Des, d_w1, conv1Des, conv1Algo, workSpace[0], space, beta, conv1OutDes, d_conv1Out)); add_biase << <dim3(416, 416), 1 >> >(d_conv1Out, d_b1, 416 * 416 * 16, 416 * 416); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 1------------------------------------------------------------------ //leaky_relu << <dim3(1665, 1665), 1 >> > (d_conv1Out, .1, 416 * 416 * 16); //int x = ceil(sqrt((416 * 416 * 16) / ( threadsPerBlock))); //std::cout << "x = " << x << "\n"; //leaky_relu_v2 << < dim3(x, x), threadsPerBlock >> > (d_conv1Out, .1, 416 * 416 * 16); //leaky_relu_v3 << <dim3(416,416),1 >> > (d_conv1Out, .1, 416 * 416 * 16, 416 * 416); leaky_relu_v4 << <dim3(numBlocks[0], numBlocks[0]), 1 >> > (d_conv1Out, .1, 416 * 416 * 16, shift); //----------------------------------------------------max 1---------------------------------------------------------------- // MaxPooling 2×2 2 (208, 208, 16) cudnnCheck(cudnnPoolingForward(cudnn, max1Des, alpha, conv1OutDes, d_conv1Out, beta, max1OutDes, d_max1Out)); //--------------------------------------------------------conv2------------------------------------------------------------------- //[3,3,16,32] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max1OutDes, w2Des, conv2Des, conv2OutDes, conv2Algo, &space)); if (i == 0) { cudaCheck(cudaMalloc(&workSpace[1], space)); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max1OutDes, d_max1Out, w2Des, d_w2, conv2Des, conv2Algo, workSpace[1], space, beta, conv2OutDes, d_conv2Out)); add_biase << <dim3(208, 208), 1 >> >(d_conv2Out, d_b2, 208 * 208 * 32, 208 * 208); // to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 2------------------------------------------------------------------ //(208, 208, 32) //leaky_relu << <dim3(1180, 1180), 1 >> > (d_conv2Out, .1, 208 * 208 * 32); //leaky_relu_v3 << <dim3(208,208),1 >> > (d_conv2Out, .1, 208 * 208 * 32, 208 * 208); leaky_relu_v4 << <dim3(numBlocks[1], numBlocks[1]), 1 >> > (d_conv2Out, .1, 208 * 208 * 32, shift); //----------------------------------------------------max 2---------------------------------------------------------------- //MaxPooling 2×2 2 (104, 104, 32) cudnnCheck(cudnnPoolingForward(cudnn, max2Des, alpha, conv2OutDes, d_conv2Out, beta, max2OutDes, d_max2Out)); //--------------------------------------------------------conv3------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max2OutDes, w3Des, conv3Des, conv3OutDes, conv3Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[2], space); totalSpace += space; } long m = clock(); cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max2OutDes, d_max2Out, w3Des, d_w3, conv3Des, conv3Algo, workSpace[2], space, beta, conv3OutDes, d_conv3Out)); cout << "time for conv 3 " << clock() - m << "\n"; //don't forget to add the biases add_biase << <dim3(104, 104), 1 >> >(d_conv3Out, d_b3, 104 * 104 * 64, 104 * 104); //-----------------------------------------------------relu 3------------------------------------------------------------------ ////(104, 104, 64) //leaky_relu << <dim3(835, 835), 1 >> > (d_conv3Out, .1, 104 * 104 * 64); //leaky_relu_v3 << <dim3(104, 104), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, 104 * 104); leaky_relu_v4 << <dim3(numBlocks[2], numBlocks[2]), 1 >> > (d_conv3Out, .1, 104 * 104 * 64, shift); //----------------------------------------------------max 3---------------------------------------------------------------- //MaxPooling 2×2 2 (52, 52, 64) cudnnCheck(cudnnPoolingForward(cudnn, max3Des, alpha, conv3OutDes, d_conv3Out, beta, max3OutDes, d_max3Out)); //--------------------------------------------------------conv4------------------------------------------------------------------- //[3,3,64,128] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max3OutDes, w4Des, conv4Des, conv4OutDes, conv4Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[3], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max3OutDes, d_max3Out, w4Des, d_w4, conv4Des, conv4Algo, workSpace[3], space, beta, conv4OutDes, d_conv4Out)); //don't forget to add the biases //cout << "time for conv 2 " << clock() - m << "\n"; add_biase << <dim3(52, 52), 1 >> >(d_conv4Out, d_b4, 52 * 52 * 128, 52 * 52); //test(d_conv4Out, 0, 16); //test(d_conv4Out, 128, 128 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_conv4Out, 52 * 128, 52 * 128 + 16); //test(d_conv4Out, 52 * 128 + 128, 52 * 128 + 128 + 16); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 4------------------------------------------------------------------ ////(52, 52, 128) //leaky_relu << <dim3(600, 600), 1 >> > (d_conv4Out, .1, 52 * 52 * 128); //leaky_relu_v3 << <dim3(52, 52), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, 52 * 52); leaky_relu_v4 << <dim3(numBlocks[3], numBlocks[3]), 1 >> > (d_conv4Out, .1, 52 * 52 * 128, shift); //----------------------------------------------------max 4---------------------------------------------------------------- //MaxPooling 2×2 2 (26, 26, 128) cudnnCheck(cudnnPoolingForward(cudnn, max4Des, alpha, conv4OutDes, d_conv4Out, beta, max4OutDes, d_max4Out)); //test(d_max4Out, 0, 16); //test(d_max4Out, 128, 128 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_max4Out, 26 * 128, 26 * 128 + 16); //test(d_max4Out, 26 * 128 + 128, 26 * 128 + 128 + 16); //--------------------------------------------------------conv5------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max4OutDes, w5Des, conv5Des, conv5OutDes, conv5Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[4], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max4OutDes, d_max4Out, w5Des, d_w5, conv5Des, conv5Algo, workSpace[4], space, beta, conv5OutDes, d_conv5Out)); //don't forget to add the biases add_biase << <dim3(28, 28), 1 >> >(d_conv5Out, d_b5, 26 * 26 * 256, 26 * 26); //test(d_conv5Out, 0, 16); //test(d_conv5Out, 256, 256 + 16); ////test(d_conv2Out, 32+16, 32 + 32); ////test(d_conv1Out, 32 + 16, 32 + 32); //test(d_conv5Out, 26 * 256, 26 * 256 + 16); //test(d_conv5Out, 26 * 256 + 256, 26 * 256 + 256 + 16); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 5------------------------------------------------------------------ ////(26, 26, 256) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv5Out, .1, 26 * 26 * 256); //leaky_relu_v3 << <dim3(26, 26), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, 26 * 26); leaky_relu_v4 << <dim3(numBlocks[4], numBlocks[4]), 1 >> > (d_conv5Out, .1, 26 * 26 * 256, shift); //----------------------------------------------------max 5---------------------------------------------------------------- //MaxPooling 2×2 2 (13, 13, 256) cudnnCheck(cudnnPoolingForward(cudnn, max5Des, alpha, conv5OutDes, d_conv5Out, beta, max5OutDes, d_max5Out)); //--------------------------------------------------------conv6------------------------------------------------------------------- //[3,3,256,512] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max5OutDes, w6Des, conv6Des, conv6OutDes, conv6Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[5], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max5OutDes, d_max5Out, w6Des, d_w6, conv6Des, conv6Algo, workSpace[5], space, beta, conv6OutDes, d_conv6Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv6Out, d_b6, 13 * 13 * 512, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 6------------------------------------------------------------------ ////(13, 13, 512) //leaky_relu << <dim3(300, 300), 1 >> > (d_conv6Out, .1, 13 * 13 * 512); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, 13 * 13); leaky_relu_v4 << <dim3(numBlocks[5], numBlocks[5]), 1 >> > (d_conv6Out, .1, 13 * 13 * 512, shift); //----------------------------------------------------max 6---------------------------------------------------------------- //MaxPooling 2×2 1 (13, 13, 512) cudnnCheck(cudnnPoolingForward(cudnn, max6Des, alpha, conv6OutDes, d_conv6Out, beta, max6OutDes, d_max6Out)); //--------------------------------------------------------conv7------------------------------------------------------------------- //[3,3,512,1024] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, max6OutDes, w7Des, conv7Des, conv7OutDes, conv7Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[6], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, max6OutDes, d_max6Out, w7Des, d_w7, conv7Des, conv7Algo, workSpace[6], space, beta, conv7OutDes, d_conv7Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv7Out, d_b7, 13 * 13 * 1024, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 7------------------------------------------------------------------ ////(13 x 13 x 1024) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, 13 * 13); leaky_relu_v4 << <dim3(numBlocks[6], numBlocks[6]), 1 >> > (d_conv7Out, .1, 13 * 13 * 1024, shift); //--------------------------------------------------------conv8------------------------------------------------------------------- //[3,3,1024,1024] cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, conv7OutDes, w8Des, conv8Des, conv8OutDes, conv8Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[7], space); totalSpace += space; } //cout << "total space " << totalSpace/(1024*1024) << " MB\n"; cudnnCheck(cudnnConvolutionForward(cudnn, alpha, conv7OutDes, d_conv7Out, w8Des, d_w8, conv8Des, conv8Algo, workSpace[7], space, beta, conv8OutDes, d_conv8Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv8Out, d_b8, 13 * 13 * 1024, 13 * 13); //to be space effecient free workspace but make sure it doesn't include any data related to convolution //-----------------------------------------------------relu 8------------------------------------------------------------------ ////(13 x 13 x 1024) //leaky_relu << <dim3(420, 420), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024); //leaky_relu_v3 << <dim3(13, 13), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, 13 * 13); //x = ceil(sqrt((13 * 13 * 1024) / shift)); leaky_relu_v4 << <dim3(numBlocks[7], numBlocks[7]), 1 >> > (d_conv8Out, .1, 13 * 13 * 1024, shift); //--------------------------------------------------------conv9------------------------------------------------------------------- cudnnCheck(cudnnGetConvolutionForwardWorkspaceSize(cudnn, conv8OutDes, w9Des, conv9Des, conv9OutDes, conv9Algo, &space)); if (i == 0) { cudaMalloc(&workSpace[8], space); totalSpace += space; } cudnnCheck(cudnnConvolutionForward(cudnn, alpha, conv8OutDes, d_conv8Out, w9Des, d_w9, conv9Des, conv9Algo, workSpace[8], space, beta, conv9OutDes, d_conv9Out)); //don't forget to add the biases add_biase << <dim3(13, 13), 1 >> > (d_conv9Out, d_b9, 13 * 13 * 125, 13 * 13); //another optimization separate first then sigmoid exp use the predefined ones sigmoid_exp << <dim3(150, 150), 1 >> > (d_conv9Out, d_anchors, 13 * 13 * 125); separate_data << <dim3(150, 150), 1 >> > (d_conv9Out, d_boxes_dims, d_predictions, d_classes, 13 * 13 * 125); cudnnCheck(cudnnSoftmaxForward(cudnn, CUDNN_SOFTMAX_FAST, CUDNN_SOFTMAX_MODE_CHANNEL, alpha, softmaxInputDes, d_classes, beta, softmaxOutDes, d_classes_softmax)); scores << <dim3(32, 32), 1 >> > (d_classes_softmax, d_predictions, 13 * 13 * 5); bool_arr << <dim3(30, 30), 1 >> >(d_boxes, 13 * 13 * 5, false); filter << < dim3(150, 150), 1 >> > (d_classes_softmax, d_boxes, 0.3, 13 * 13 * 5 * 20); get_max_scores << <dim3(30, 30), 1 >> > (d_classes_softmax, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 13 * 13 * 5); calculate_points << <dim3(30, 30), 1 >> > (d_boxes_dims, d_points, d_boxes, 13 * 13 * 5); //cudaDeviceSynchronize(); non_max_supression << < dim3(30, 30), 1 >> > (d_points, d_boxes, d_maxScorePerBox, d_maxScoreIndex, 0.3, 13 * 13 * 5); cudaCheck(cudaMemcpy(h_boxes, d_boxes, 13 * 13 * 5 * sizeof(bool), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(h_maxScorePerBox, d_maxScorePerBox, 13 * 13 * 5 * sizeof(float), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(h_maxScoreIndex, d_maxScoreIndex, 13 * 13 * 5 * sizeof(int), cudaMemcpyDeviceToHost)); //cudaCheck(cudaMemcpy(h_boxes_dims, d_boxes_dims, 13 * 13 * 5 * 4 * sizeof(float), cudaMemcpyDeviceToHost)); cudaCheck(cudaMemcpy(h_points, d_points, 13 * 13 * 5 * 4 * sizeof(float), cudaMemcpyDeviceToHost)); cv::Mat output(416, 416, CV_8UC3); cv::normalize(image[i], output, 0.0, 255.0, cv::NORM_MINMAX); for (int i = 0; i < 13 * 13 * 5; i++) { if (h_boxes[i]) { int index = i * 4; int left = h_points[index]; int top = h_points[index + 1]; int right = h_points[index + 2]; int bottom = h_points[index + 3]; float confidence = h_maxScorePerBox[i]; string className = classes[h_maxScoreIndex[i]]; std::cout << "( " << left << " , " << top << " ) , (" << right << " , " << bottom << " ) class " << className << " with prop " << confidence << "\n"; //threashold boxes left = (left <= 416) ? left : 416; top = (top <= 416) ? top : 416; right = (right <= 416) ? right : 416; bottom = (bottom <= 416) ? bottom : 416; cv::rectangle(output, cv::Point(left, top), cv::Point(right, bottom), colors[h_maxScoreIndex[i]], 3); //draw << <dim3(416, 416), 1 >> > (d_input, left, top, right, bottom, colors[h_maxScoreIndex[i]].val[0], //colors[h_maxScoreIndex[i]].val[1], colors[h_maxScoreIndex[i]].val[2], 416, 416); } } cudaCheck(cudaMemcpy(h_image, d_input, 416 * 416 * 3 * sizeof(float), cudaMemcpyDeviceToHost)); //cv::Mat output0(416, 416, CV_32FC3,h_image); //cv::normalize(output0, output, 0.0, 255.0, cv::NORM_MINMAX); //cv::cvtColor(output, output, CV_RGB2BGR); //cv::normalize(output, output, 0.0, 255.0, cv::NORM_MINMAX); long t2 = clock(); cout << "time = " << t2 - t1 << "\n"; string num = std::to_string(i); string file = "output" + num + ".png"; save_image(file.c_str(), output); } //to be space effecient free workspace but make sure it doesn't include any data related to convolution cout << "total space " << totalSpace / (1024 * 1024) << "MB\n"; }
e02fc5017fcc686c031dc6838a11a65325d2daae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This code should run for any matrix size. This kernel outperforms cuda-2.2 when m, n, k >= 512 @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_N_64_16_16_16_4( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { __shared__ double Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int ibx = blockIdx.x * 64; int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; /* Taking care of invalid memory access in dimension M */ if ( ibx+idt >= m ) A += ibx+0; else A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); B += tx+__mul24(iby, ldb); /* These variables guide the threads to avoid invalid memory accesses in dimension N. Simply it's the stopping criterion. or you can say that access index wraps around to a valid memory location. */ int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb; if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; } if ( s1 == 0 ) B += __mul24(ty, ldb); else s1=0; const double *Bend = B + k - k % 16; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 15 ) { do { double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); } /* Common sub expression elimination. */ ibx = ibx + idt - m; /* remembering k dimension */ ldb = m = k; /* k changed to support the generic case and reuse valuable registers */ k = k % 16; m -= k; /* Here we are taking care of k % dim_k portions */ if ( k != 0 ) { /* Avoid Invalid Memory access in dimension K If some thread enters this if ( ) block first access to B should be valid as K isn't divisible by blk_K Note that dimension N has been taken care of by s1, s2, s3, s4 But depending upon K and thread index tx, some memory access may be still invalid, so take care of them now by setting s1, s2, s3, s4 = 0 B might have been advanced in the previous loop, take care of that, this is about right bottom corner. */ if ( m + tx >= ldb ) { s1 = s2 = s3 = s4 = 0; B -= tx; } Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); for(int i=0; i < k; i++) { daxpy( A[0], &Bb[i+0][0], Cb ); A += lda; } } /* Now taking care of dimension M, N that doesnt fit into blocks */ if ( (iby+16) >= n ) { lda = n - iby; } else { lda = 16; } if ( ibx >= 0 ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } extern "C" void magmablas_dgemm_N_N_64_16_16_16_4( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 ); hipLaunchKernelGGL(( dgemm_kernel_N_N_64_16_16_16_4), dim3(grid), dim3(threads), 0, magma_stream , C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
e02fc5017fcc686c031dc6838a11a65325d2daae.cu
/* -- MAGMA (version 1.6.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date November 2014 @precisions normal d -> s */ #include "common_magma.h" #include "commonblas_d.h" /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double* __restrict__ b, double* __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } /** Purpose: -------- This routine computes C = alpha * A*B + beta * C B is put into shared memory Parameters Used: blk_M=64 blk_N=16 blk_K=16 nthd_x=16 nthd_y=4 This code should run for any matrix size. This kernel outperforms cuda-2.2 when m, n, k >= 512 @ingroup magma_dblas3 ********************************************************************/ __global__ void dgemm_kernel_N_N_64_16_16_16_4( double* __restrict__ C, const double* __restrict__ A, const double* __restrict__ B, int m, int n, int k, int lda, int ldb, int ldc, double alpha, double beta ) { __shared__ double Bb[16][17]; const int tx = threadIdx.x; const int ty = threadIdx.y; int ibx = blockIdx.x * 64; int iby = blockIdx.y * 16; const int idt = ty * 16 + tx; /* Taking care of invalid memory access in dimension M */ if ( ibx+idt >= m ) A += ibx+0; else A += ibx + idt; C += ibx + idt + __mul24(iby, ldc); B += tx+__mul24(iby, ldb); /* These variables guide the threads to avoid invalid memory accesses in dimension N. Simply it's the stopping criterion. or you can say that access index wraps around to a valid memory location. */ int s1=0, s2=4*ldb, s3=8*ldb, s4=12*ldb; if ( iby+ty >= n ) { s1=1; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+4 >= n ) { s1=0; s2=0*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+8 >= n ) { s1=0; s2=4*ldb; s3=0*ldb; s4=0*ldb; } else if ( iby+ty+12 >= n ) { s1=0; s2=4*ldb; s3=8*ldb; s4=0*ldb; } if ( s1 == 0 ) B += __mul24(ty, ldb); else s1=0; const double *Bend = B + k - k % 16; double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; if ( k > 15 ) { do { double Ab[4] = {A[0], A[lda], A[2*lda], A[3*lda]}; Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); A += 4 * lda; daxpy( Ab[0], &Bb[0][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[1][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[2][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[3][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[4][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[5][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[6][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[7][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[8][0], Cb ); Ab[0] = A[0*lda]; daxpy( Ab[1], &Bb[9][0], Cb ); Ab[1] = A[1*lda]; daxpy( Ab[2], &Bb[10][0], Cb ); Ab[2] = A[2*lda]; daxpy( Ab[3], &Bb[11][0], Cb ); Ab[3] = A[3*lda]; A += 4 * lda; daxpy( Ab[0], &Bb[12][0], Cb ); daxpy( Ab[1], &Bb[13][0], Cb ); daxpy( Ab[2], &Bb[14][0], Cb ); daxpy( Ab[3], &Bb[15][0], Cb ); B += 16; __syncthreads(); } while (B < Bend); } /* Common sub expression elimination. */ ibx = ibx + idt - m; /* remembering k dimension */ ldb = m = k; /* k changed to support the generic case and reuse valuable registers */ k = k % 16; m -= k; /* Here we are taking care of k % dim_k portions */ if ( k != 0 ) { /* Avoid Invalid Memory access in dimension K If some thread enters this if ( ) block first access to B should be valid as K isn't divisible by blk_K Note that dimension N has been taken care of by s1, s2, s3, s4 But depending upon K and thread index tx, some memory access may be still invalid, so take care of them now by setting s1, s2, s3, s4 = 0 B might have been advanced in the previous loop, take care of that, this is about right bottom corner. */ if ( m + tx >= ldb ) { s1 = s2 = s3 = s4 = 0; B -= tx; } Bb[tx][ty+0 ] = B[s1]; Bb[tx][ty+4 ] = B[s2]; Bb[tx][ty+8 ] = B[s3]; Bb[tx][ty+12] = B[s4]; __syncthreads(); for(int i=0; i < k; i++) { daxpy( A[0], &Bb[i+0][0], Cb ); A += lda; } } /* Now taking care of dimension M, N that doesnt fit into blocks */ if ( (iby+16) >= n ) { lda = n - iby; } else { lda = 16; } if ( ibx >= 0 ) lda = 0; else lda = lda; switch(lda) { case 16: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc]; break; case 15: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc]; break; case 14: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc]; break; case 13: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc]; break; case 12: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc]; break; case 11: C[ 0 ] = alpha * Cb[ 0] + beta * C[ 0 ]; C[ 1*ldc] = alpha * Cb[ 1] + beta * C[ 1*ldc]; C[ 2*ldc] = alpha * Cb[ 2] + beta * C[ 2*ldc]; C[ 3*ldc] = alpha * Cb[ 3] + beta * C[ 3*ldc]; C[ 4*ldc] = alpha * Cb[ 4] + beta * C[ 4*ldc]; C[ 5*ldc] = alpha * Cb[ 5] + beta * C[ 5*ldc]; C[ 6*ldc] = alpha * Cb[ 6] + beta * C[ 6*ldc]; C[ 7*ldc] = alpha * Cb[ 7] + beta * C[ 7*ldc]; C[ 8*ldc] = alpha * Cb[ 8] + beta * C[ 8*ldc]; C[ 9*ldc] = alpha * Cb[ 9] + beta * C[ 9*ldc]; C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc]; break; case 10: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc]; break; case 9: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc]; break; case 8: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc]; break; case 7: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc]; break; case 6: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc]; break; case 5: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc]; break; case 4: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc]; break; case 3: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc]; break; case 2: C[0 ] = alpha * Cb[0] + beta * C[0 ]; C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc]; break; case 1: C[0 ] = alpha * Cb[0] + beta * C[0 ]; break; case 0: break; } } extern "C" void magmablas_dgemm_N_N_64_16_16_16_4( double *C, const double *A, const double *B, magma_int_t m, magma_int_t n, magma_int_t k, magma_int_t lda, magma_int_t ldb, magma_int_t ldc, double alpha, double beta ) { dim3 threads( 16, 4 ); dim3 grid( (m - 1)/64 + 1, (n - 1)/16 + 1 ); dgemm_kernel_N_N_64_16_16_16_4<<< grid, threads, 0, magma_stream >>> ( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta ); }
887e4fbf53dfda9b3b4964ed335619dcc0ca70ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <string.h> #include <utility> #include <ctime> #include <time.h> using namespace std; // output ofstream fo("Ans.txt"); // Cac bien hang so const int ARRAY_SIZE_INP = 12005; const int ARRAY_BYTES_INP = ARRAY_SIZE_INP * sizeof(int); const int ARRAY_SIZE_OUT = 605; const int ARRAY_BYTES_OUT = ARRAY_SIZE_OUT * sizeof(int); //cac bien chinh int l = 9, d = 2; char cDataInp[ARRAY_SIZE_INP]; int h_dataMotif[ARRAY_SIZE_INP]; string sDataInp[20]; struct Motif_Ans { int dis; string motif; int adress[20]; }; //input tu file void File_Input() { l = 9; d = 2; FILE * pFile; pFile = fopen("datacu.txt", "r"); if (pFile == NULL) perror("Error opening file"); else { if (fgets(cDataInp, ARRAY_SIZE_INP, pFile) != NULL) cout << "nhap du lieu thanh cong!\n"; fclose(pFile); } for (int i = 0; i < strlen(cDataInp); ++i) { //A=0 C=1 G=2 T=3 switch (cDataInp[i]) { case 'A': { h_dataMotif[i] = 0; break; } case 'C': { h_dataMotif[i] = 1; break; } case 'G': { h_dataMotif[i] = 2; break; } case 'T': { h_dataMotif[i] = 3; break; } default: cout << "error chuyen sang int"; break; } } int k = 0; string temp = cDataInp; //cout << temp << endl; for (int i = 0; i < temp.size(); i += 600) { sDataInp[k] = temp.substr(i, 600); //cout << k << ". " << sDataInp[k] << endl; k++; } } int score_ham(string s1, string s2) { int res = 0; for (int i = 0; i<s1.size(); ++i) if (s1[i] != s2[i]) res++; return res; } Motif_Ans dis_hamming(string s) { Motif_Ans res; res.motif = s; int res_Sum = 0, temp_score = 999, temp_Adress; for (int i = 0; i<20; ++i) { string s1 = sDataInp[i]; temp_score = 999; for (int j = 0; j < s1.size() - l + 1; ++j) { string temp_str = s1.substr(j, l); int score_s = score_ham(s, temp_str); if (score_s < temp_score) { temp_score = score_s; temp_Adress = j + 1; } } res_Sum += temp_score; res.adress[i] = temp_Adress; } res.dis = res_Sum; return res; } __global__ void patternBarching(const int* d_datainp, const int l, const int d, int *ans) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < 600 - l) { //printf("\n %d", index); int ansMotif_sorce = 999;// motif tra ra int ansMotif_string[40];//motif tra ra int motif_NeSorce = 999;//kq tra ve ham NE int motif_NeString[40];//kq tra ve ham NE int temp_Sorce = 999; int temp_Str[40]; //cat chuoi motif for (int i = 0; i < l; ++i) { ansMotif_string[i] = d_datainp[i + index]; motif_NeString[i] = ansMotif_string[i]; } //begin tinh hamming int tempRow, tempSubRow; for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (ansMotif_string[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } ansMotif_sorce += tempRow; } //end tinh hamming cho chuoi vao //begin tinh pattern branching for (int a = 0; a <= d; a++) { //kiem tra motif dis if (motif_NeSorce < ansMotif_sorce) { ansMotif_sorce = motif_NeSorce; for (int i = 0; i < l; ++i) { ansMotif_string[i] = motif_NeString[i]; temp_Str[i] = motif_NeString[i]; } } else {//gan bien Ham Ne for (int i = 0; i < l; ++i) { temp_Str[i] = ansMotif_string[i]; } }//end kiem tra motif //begin ham bestNeighbor int change = -1; for (int b = 0; b < l; ++b) { //trg hop 0 A if (temp_Str[b] != 0) { change = temp_Str[b]; temp_Str[b] = 0; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change;//tra lai gia tri ban dau } //trg hop 1 C if (temp_Str[b] != 1) { change = temp_Str[b]; temp_Str[b] = 1; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } //trg hop 2 G if (temp_Str[b] != 2) { change = temp_Str[b]; temp_Str[b] = 2; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } //trg hop 3 T if (temp_Str[b] != 3) { change = temp_Str[b]; temp_Str[b] = 3; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } } }//end Ne //end tinh int dem = 0; int res = 0; for (int i = 0; i < l; ++i) { res = res | (ansMotif_string[i] << dem); dem += 2; if (index == 574) printf("%d ", ansMotif_string[i]); } ans[index] = res; } } int main() { File_Input(); //test /*string test = "GTTCGGCGT"; Motif_Ans testMoitf = dis_hamming(test); fo << testMoitf.dis << endl; cout<<sDataInp[0].substr(574, l) << endl; cout << h_dataMotif[574] << endl;*/ //end test int h_dataOut[ARRAY_SIZE_OUT]; for (int i = 0; i < 600; ++i) { h_dataOut[i] = -1; } //GPU khoi tao bien va bo nho int *d_dataMotif; if (hipMalloc(&d_dataMotif, ARRAY_BYTES_INP) != hipSuccess) { cout << "error allocating memory!" << endl; return 0; } int *d_dataOut; if (hipMalloc(&d_dataOut, ARRAY_BYTES_OUT) != hipSuccess) { cout << "error allocating memory!" << endl; hipFree(d_dataMotif); return 0; } if (hipMemcpy(d_dataMotif, h_dataMotif, ARRAY_BYTES_INP, hipMemcpyHostToDevice) != hipSuccess) { cout << "error copying memory!" << endl; hipFree(d_dataMotif); hipFree(d_dataOut); return 0; } if (hipMemcpy(d_dataOut, h_dataOut, ARRAY_BYTES_OUT, hipMemcpyHostToDevice) != hipSuccess) { cout << "error copying memory!" << endl; hipFree(d_dataMotif); hipFree(d_dataOut); return 0; } cout << "dang chay ...." << endl; //khoi tao chay cuda int threadsPerBlock = 256; int blocksPerGrid = (600 + threadsPerBlock - 1) / threadsPerBlock; hipLaunchKernelGGL(( patternBarching) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_dataMotif, l, d, d_dataOut); fo << "\nTime " << clock() / (double)1000 << " Sec" << endl; //copy data tro ve if (hipMemcpy(h_dataOut, d_dataOut, ARRAY_BYTES_OUT, hipMemcpyDeviceToHost) != hipSuccess) { cout << "error copying memory!" << endl; hipFree(d_dataMotif); hipFree(d_dataOut); return 0; } //lay best motif cout << "\n du lieu tra ve" << endl; Motif_Ans best_motif,temp_motif_return; best_motif.dis = 999; for (int i = 0; i < 600; i++) { int chuyenStr = h_dataOut[i]; int k = 0; string res = ""; //cout << chuyenStr << endl; if (chuyenStr != -1) { //chuyen kieu in sang string for (int j = 0; j < l; ++j) { int temp = (chuyenStr >> k) & 3; //cout << temp << ' '; switch (temp) { case 0: { res += 'A'; break; } case 1: { res += 'C'; break; } case 2: { res += 'G'; break; } case 3: { res += 'T'; break; } } k += 2; } //if (i == 574) fo << res << endl; //ket thuc chuyen //kiem tra do dai va tra vi tri temp_motif_return = dis_hamming(res); if (temp_motif_return.dis < best_motif.dis) { cout << "thay doi best" << endl; best_motif.dis = temp_motif_return.dis; best_motif.motif = temp_motif_return.motif; for (int z = 0; z < 20; ++z) { best_motif.adress[z] = temp_motif_return.adress[z]; } } //end kiem tra cout << "------------" << endl; cout << temp_motif_return.motif << endl; cout << temp_motif_return.dis << endl; cout << best_motif.motif << endl; cout << best_motif.dis << endl; cout << "+++++++++++++" << endl; } } fo << "Best motif: " << best_motif.motif << endl << "Motif location: " << endl; for (int z = 0; z < 20; ++z) { fo << best_motif.adress[z] << ' '; } cout << "xong" << endl; fo << "\nEnd Time " << clock() / (double)1000 << " Sec" << endl; hipFree(d_dataMotif); hipFree(d_dataOut); return 0; }
887e4fbf53dfda9b3b4964ed335619dcc0ca70ba.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <string.h> #include <utility> #include <ctime> #include <time.h> using namespace std; // output ofstream fo("Ans.txt"); // Cac bien hang so const int ARRAY_SIZE_INP = 12005; const int ARRAY_BYTES_INP = ARRAY_SIZE_INP * sizeof(int); const int ARRAY_SIZE_OUT = 605; const int ARRAY_BYTES_OUT = ARRAY_SIZE_OUT * sizeof(int); //cac bien chinh int l = 9, d = 2; char cDataInp[ARRAY_SIZE_INP]; int h_dataMotif[ARRAY_SIZE_INP]; string sDataInp[20]; struct Motif_Ans { int dis; string motif; int adress[20]; }; //input tu file void File_Input() { l = 9; d = 2; FILE * pFile; pFile = fopen("datacu.txt", "r"); if (pFile == NULL) perror("Error opening file"); else { if (fgets(cDataInp, ARRAY_SIZE_INP, pFile) != NULL) cout << "nhap du lieu thanh cong!\n"; fclose(pFile); } for (int i = 0; i < strlen(cDataInp); ++i) { //A=0 C=1 G=2 T=3 switch (cDataInp[i]) { case 'A': { h_dataMotif[i] = 0; break; } case 'C': { h_dataMotif[i] = 1; break; } case 'G': { h_dataMotif[i] = 2; break; } case 'T': { h_dataMotif[i] = 3; break; } default: cout << "error chuyen sang int"; break; } } int k = 0; string temp = cDataInp; //cout << temp << endl; for (int i = 0; i < temp.size(); i += 600) { sDataInp[k] = temp.substr(i, 600); //cout << k << ". " << sDataInp[k] << endl; k++; } } int score_ham(string s1, string s2) { int res = 0; for (int i = 0; i<s1.size(); ++i) if (s1[i] != s2[i]) res++; return res; } Motif_Ans dis_hamming(string s) { Motif_Ans res; res.motif = s; int res_Sum = 0, temp_score = 999, temp_Adress; for (int i = 0; i<20; ++i) { string s1 = sDataInp[i]; temp_score = 999; for (int j = 0; j < s1.size() - l + 1; ++j) { string temp_str = s1.substr(j, l); int score_s = score_ham(s, temp_str); if (score_s < temp_score) { temp_score = score_s; temp_Adress = j + 1; } } res_Sum += temp_score; res.adress[i] = temp_Adress; } res.dis = res_Sum; return res; } __global__ void patternBarching(const int* d_datainp, const int l, const int d, int *ans) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < 600 - l) { //printf("\n %d", index); int ansMotif_sorce = 999;// motif tra ra int ansMotif_string[40];//motif tra ra int motif_NeSorce = 999;//kq tra ve ham NE int motif_NeString[40];//kq tra ve ham NE int temp_Sorce = 999; int temp_Str[40]; //cat chuoi motif for (int i = 0; i < l; ++i) { ansMotif_string[i] = d_datainp[i + index]; motif_NeString[i] = ansMotif_string[i]; } //begin tinh hamming int tempRow, tempSubRow; for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (ansMotif_string[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } ansMotif_sorce += tempRow; } //end tinh hamming cho chuoi vao //begin tinh pattern branching for (int a = 0; a <= d; a++) { //kiem tra motif dis if (motif_NeSorce < ansMotif_sorce) { ansMotif_sorce = motif_NeSorce; for (int i = 0; i < l; ++i) { ansMotif_string[i] = motif_NeString[i]; temp_Str[i] = motif_NeString[i]; } } else {//gan bien Ham Ne for (int i = 0; i < l; ++i) { temp_Str[i] = ansMotif_string[i]; } }//end kiem tra motif //begin ham bestNeighbor int change = -1; for (int b = 0; b < l; ++b) { //trg hop 0 A if (temp_Str[b] != 0) { change = temp_Str[b]; temp_Str[b] = 0; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change;//tra lai gia tri ban dau } //trg hop 1 C if (temp_Str[b] != 1) { change = temp_Str[b]; temp_Str[b] = 1; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } //trg hop 2 G if (temp_Str[b] != 2) { change = temp_Str[b]; temp_Str[b] = 2; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } //trg hop 3 T if (temp_Str[b] != 3) { change = temp_Str[b]; temp_Str[b] = 3; temp_Sorce = 0;//diem dis //begin tinh hamming for (int i = 0; i < 20; ++i) { tempRow = 999; for (int j = i * 600; j < (i + 1) * 600 - l; ++j) { tempSubRow = 0; for (int k = 0; k < l; k++) { if (temp_Str[k] != d_datainp[k + j]) tempSubRow++; } if (tempSubRow < tempRow) tempRow = tempSubRow; } temp_Sorce += tempRow; } //end tinh hamming cho chuoi vao //kiem tra dis motif Ne if (temp_Sorce < motif_NeSorce) { motif_NeSorce = temp_Sorce; for (int c = 0; c < l; ++c) { motif_NeString[c] = temp_Str[c]; } } temp_Str[b] = change; } } }//end Ne //end tinh int dem = 0; int res = 0; for (int i = 0; i < l; ++i) { res = res | (ansMotif_string[i] << dem); dem += 2; if (index == 574) printf("%d ", ansMotif_string[i]); } ans[index] = res; } } int main() { File_Input(); //test /*string test = "GTTCGGCGT"; Motif_Ans testMoitf = dis_hamming(test); fo << testMoitf.dis << endl; cout<<sDataInp[0].substr(574, l) << endl; cout << h_dataMotif[574] << endl;*/ //end test int h_dataOut[ARRAY_SIZE_OUT]; for (int i = 0; i < 600; ++i) { h_dataOut[i] = -1; } //GPU khoi tao bien va bo nho int *d_dataMotif; if (cudaMalloc(&d_dataMotif, ARRAY_BYTES_INP) != cudaSuccess) { cout << "error allocating memory!" << endl; return 0; } int *d_dataOut; if (cudaMalloc(&d_dataOut, ARRAY_BYTES_OUT) != cudaSuccess) { cout << "error allocating memory!" << endl; cudaFree(d_dataMotif); return 0; } if (cudaMemcpy(d_dataMotif, h_dataMotif, ARRAY_BYTES_INP, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "error copying memory!" << endl; cudaFree(d_dataMotif); cudaFree(d_dataOut); return 0; } if (cudaMemcpy(d_dataOut, h_dataOut, ARRAY_BYTES_OUT, cudaMemcpyHostToDevice) != cudaSuccess) { cout << "error copying memory!" << endl; cudaFree(d_dataMotif); cudaFree(d_dataOut); return 0; } cout << "dang chay ...." << endl; //khoi tao chay cuda int threadsPerBlock = 256; int blocksPerGrid = (600 + threadsPerBlock - 1) / threadsPerBlock; patternBarching <<<blocksPerGrid, threadsPerBlock >>> (d_dataMotif, l, d, d_dataOut); fo << "\nTime " << clock() / (double)1000 << " Sec" << endl; //copy data tro ve if (cudaMemcpy(h_dataOut, d_dataOut, ARRAY_BYTES_OUT, cudaMemcpyDeviceToHost) != cudaSuccess) { cout << "error copying memory!" << endl; cudaFree(d_dataMotif); cudaFree(d_dataOut); return 0; } //lay best motif cout << "\n du lieu tra ve" << endl; Motif_Ans best_motif,temp_motif_return; best_motif.dis = 999; for (int i = 0; i < 600; i++) { int chuyenStr = h_dataOut[i]; int k = 0; string res = ""; //cout << chuyenStr << endl; if (chuyenStr != -1) { //chuyen kieu in sang string for (int j = 0; j < l; ++j) { int temp = (chuyenStr >> k) & 3; //cout << temp << ' '; switch (temp) { case 0: { res += 'A'; break; } case 1: { res += 'C'; break; } case 2: { res += 'G'; break; } case 3: { res += 'T'; break; } } k += 2; } //if (i == 574) fo << res << endl; //ket thuc chuyen //kiem tra do dai va tra vi tri temp_motif_return = dis_hamming(res); if (temp_motif_return.dis < best_motif.dis) { cout << "thay doi best" << endl; best_motif.dis = temp_motif_return.dis; best_motif.motif = temp_motif_return.motif; for (int z = 0; z < 20; ++z) { best_motif.adress[z] = temp_motif_return.adress[z]; } } //end kiem tra cout << "------------" << endl; cout << temp_motif_return.motif << endl; cout << temp_motif_return.dis << endl; cout << best_motif.motif << endl; cout << best_motif.dis << endl; cout << "+++++++++++++" << endl; } } fo << "Best motif: " << best_motif.motif << endl << "Motif location: " << endl; for (int z = 0; z < 20; ++z) { fo << best_motif.adress[z] << ' '; } cout << "xong" << endl; fo << "\nEnd Time " << clock() / (double)1000 << " Sec" << endl; cudaFree(d_dataMotif); cudaFree(d_dataOut); return 0; }
f16164e069f9198526ce6d0054c928b02c4fb0e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "lin_gpu.h" // first version of matrix multiplication // use nr_C*nc_C threads to compute A*B extern "C" void mat_mult_gpu_v1(float *gA, size_t nr_A, size_t nc_A, float *gB, size_t nr_B, size_t nc_B, float **gC, size_t *nr_C, size_t *nc_C) { // set dimension of C *nr_C = nr_A; *nc_C = nc_B; // compute how many threads and blocks are needed size_t num_cell = nr_A*nc_B; size_t num_block = (num_cell-1)/block_size+1; // compute grid dimension size_t num_grid_y = (num_block-1)/max_grid_x+1; size_t num_grid_x = num_block < max_grid_x ? num_block : max_grid_x; // allocate memory for resulting matrix float *tmp_gC; void *gptr; hipError_t crc = hipMalloc(&gptr, num_cell*sizeof(float)); if(crc) { printf("hipMalloc Error=%d:%s\n", crc, hipGetErrorString(crc)); exit(1); } tmp_gC = (float*) gptr; // set up kernel and run dim3 dimBlock(block_size, 1); dim3 dimGrid(num_grid_x, num_grid_y); hipLaunchKernelGGL(( mat_mult_gpu_v1_kernel), dim3(dimGrid),dim3(dimBlock), 0, 0, gA, nr_A, nc_A, gB, nr_B, nc_B, tmp_gC, *nr_C, *nc_C); // check kernel result hipDeviceSynchronize(); crc = hipGetLastError(); if(crc) { printf("emptyKernel error=%d:%s\n", crc, hipGetErrorString(crc)); exit(1); } // save result *gC = tmp_gC; } // kernel function for mat_mult_gpu_v1 __global__ void mat_mult_gpu_v1_kernel(float *gA, size_t nr_A, size_t nc_A, float *gB, size_t nr_B, size_t nc_B, float *gC, size_t nr_C, size_t nc_C) { // get absoluate idx of thread size_t j = threadIdx.x+blockDim.x*(blockIdx.x+gridDim.x*blockIdx.y); // check if j is within range if(j < nr_C*nc_C) { // obtain row and column of the cell thread j to compute size_t r = j / nc_C; size_t c = j % nc_C; // compute the inner product of r-th row of A and c-th column of B float val = 0.0; for(size_t i=0; i<nc_A; i++) { val += gA[r*nc_A+i]*gB[i*nc_B+c]; } // save results gC[j] = val; } }
f16164e069f9198526ce6d0054c928b02c4fb0e9.cu
#include "lin_gpu.h" // first version of matrix multiplication // use nr_C*nc_C threads to compute A*B extern "C" void mat_mult_gpu_v1(float *gA, size_t nr_A, size_t nc_A, float *gB, size_t nr_B, size_t nc_B, float **gC, size_t *nr_C, size_t *nc_C) { // set dimension of C *nr_C = nr_A; *nc_C = nc_B; // compute how many threads and blocks are needed size_t num_cell = nr_A*nc_B; size_t num_block = (num_cell-1)/block_size+1; // compute grid dimension size_t num_grid_y = (num_block-1)/max_grid_x+1; size_t num_grid_x = num_block < max_grid_x ? num_block : max_grid_x; // allocate memory for resulting matrix float *tmp_gC; void *gptr; cudaError_t crc = cudaMalloc(&gptr, num_cell*sizeof(float)); if(crc) { printf("cudaMalloc Error=%d:%s\n", crc, cudaGetErrorString(crc)); exit(1); } tmp_gC = (float*) gptr; // set up kernel and run dim3 dimBlock(block_size, 1); dim3 dimGrid(num_grid_x, num_grid_y); mat_mult_gpu_v1_kernel<<<dimGrid,dimBlock>>>(gA, nr_A, nc_A, gB, nr_B, nc_B, tmp_gC, *nr_C, *nc_C); // check kernel result cudaThreadSynchronize(); crc = cudaGetLastError(); if(crc) { printf("emptyKernel error=%d:%s\n", crc, cudaGetErrorString(crc)); exit(1); } // save result *gC = tmp_gC; } // kernel function for mat_mult_gpu_v1 __global__ void mat_mult_gpu_v1_kernel(float *gA, size_t nr_A, size_t nc_A, float *gB, size_t nr_B, size_t nc_B, float *gC, size_t nr_C, size_t nc_C) { // get absoluate idx of thread size_t j = threadIdx.x+blockDim.x*(blockIdx.x+gridDim.x*blockIdx.y); // check if j is within range if(j < nr_C*nc_C) { // obtain row and column of the cell thread j to compute size_t r = j / nc_C; size_t c = j % nc_C; // compute the inner product of r-th row of A and c-th column of B float val = 0.0; for(size_t i=0; i<nc_A; i++) { val += gA[r*nc_A+i]*gB[i*nc_B+c]; } // save results gC[j] = val; } }
29659f47be19736b4a55f40323596e97b0462257.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <omp.h> #include <stdlib.h> #include <math.h> #include <array> #include <vector> #include <sstream> #include <fstream> #include <chrono> #include <hiprand/hiprand_kernel.h> #include <limits> #include <memory> #include <cstddef> #include <type_traits> #include "../include/musket.cuh" #include "../include/BPP_0.cuh" #include "Randoms.cpp" Randoms *randoms; const int BETA = 1; const double EVAPORATION = 0.5; const int TAUMAX = 2; const int Q = 32; int itemtypes = 50; int itemcount = 59; auto bin_capacity = 1000; bool PRINT = true; bool PALMA = true; struct Copybppitemsquantity_map_index_in_place_array_functor{ Copybppitemsquantity_map_index_in_place_array_functor(const mkt::DArray<int>& _bpp_items_quantity) : bpp_items_quantity(_bpp_items_quantity){} ~Copybppitemsquantity_map_index_in_place_array_functor() {} __device__ auto operator()(int indexx, int valuee){ int new_index = ((indexx) % (itemtypess)); // printf("%d;%d;%d;%d;%d\n", indexx, itemtypess, valuee, new_index, bpp_items_quantity.get_global((new_index))); return bpp_items_quantity.get_global((new_index)); } void init(int device){ bpp_items_quantity.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; mkt::DeviceArray<int> bpp_items_quantity; }; struct Copybppitemsweight_map_index_in_place_array_functor{ Copybppitemsweight_map_index_in_place_array_functor(const mkt::DArray<int>& _bpp_items_weight) : bpp_items_weight(_bpp_items_weight){} ~Copybppitemsweight_map_index_in_place_array_functor() {} __device__ auto operator()(int indexx, int valuee){ int new_index = ((indexx) % (itemtypess)); return bpp_items_weight.get_global((new_index))/* TODO: For multiple GPUs*/; } void init(int device){ bpp_items_weight.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; mkt::DeviceArray<int> bpp_items_weight; }; struct Packing_kernel_map_index_in_place_array_functor{ Packing_kernel_map_index_in_place_array_functor(const mkt::DArray<int>& _d_bins, const mkt::DArray<int>& _copy_bpp_items_quantity, const mkt::DArray<int>& _bpp_items_quantity, const mkt::DArray<double>& _d_eta, const mkt::DArray<double>& _d_tau, const mkt::DArray<double>& _d_probabilities, const mkt::DArray<int>& _bpp_items_weight, const mkt::DArray<double>& _d_phero, hiprandState_t* _d_rand_states_ind) : d_bins(_d_bins), copy_bpp_items_quantity(_copy_bpp_items_quantity), bpp_items_quantity(_bpp_items_quantity), d_eta(_d_eta), d_tau(_d_tau), d_probabilities(_d_probabilities), bpp_items_weight(_bpp_items_weight), d_phero(_d_phero), d_rand_states_ind(_d_rand_states_ind){} ~Packing_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, int y){ int ant_index = (iindex); int object_bin_index = ((ant_index) * (itemcountt)); int bins_used = 0; int bpp_items_prefix = (ant_index) * (itemtypess); int object_weightmax = 0; int actual_bin_weight = 0; int n_items_in_actual_bin = 0; int possible_items_to_this_bin = 0; int object_index = 0; int object_quantity = 0; int new_object_weight = 0; //ADD heaviest Object - ok for (int i = 0; ((i) < (itemtypess)); i++) { copy_bpp_items_quantity.set_global(bpp_items_prefix + i, bpp_items_quantity.get_global(i)); new_object_weight = bpp_items_weight.get_global(i); object_quantity = copy_bpp_items_quantity.get_global(i); if((object_quantity > 0) && (new_object_weight > object_weightmax)){ object_index = i; object_weightmax = new_object_weight; } } d_bins.set_global(((ant_index) * (itemcountt)), (object_index)); copy_bpp_items_quantity.set_global(((bpp_items_prefix) + (object_index)), (copy_bpp_items_quantity.get_global(((bpp_items_prefix) + (object_index))) - 1)); n_items_in_actual_bin++; actual_bin_weight += (object_weightmax); bins_used++; int weight_object_j; int object_i; int quantity_object_j; for (int i = 0; ((i) < ((itemcountt) - 1)); i++) { double eta_tau_sum = 0.0; possible_items_to_this_bin = 0; //Search POssible Items for (int j = 0; ((j) < (itemtypess)); j++) { d_eta.set_global(((bpp_items_prefix) + (j)), 0.0); d_tau.set_global(((bpp_items_prefix) + (j)), 0.0); d_probabilities.set_global(((bpp_items_prefix) + (j)), 0.0); weight_object_j = bpp_items_weight.get_global((j)); quantity_object_j = copy_bpp_items_quantity.get_global(bpp_items_prefix + j); if (((quantity_object_j) > 0) && ((weight_object_j) <= ((bin_capacity2) - (actual_bin_weight)))) { for (int k = 0; ((k) < (n_items_in_actual_bin)); k++) { object_i = d_bins.get_global((((object_bin_index) + (i)) - (k))); d_eta.set_global(((bpp_items_prefix) + (j)), d_phero.get_global(object_i * (int) itemtypess + j)); } d_eta.set_global(((bpp_items_prefix) + (j)), (d_eta.get_global(((bpp_items_prefix) + (j))) / (n_items_in_actual_bin))); d_tau.set_global(((bpp_items_prefix) + (j)), (double) pow(weight_object_j, BETA)); eta_tau_sum = eta_tau_sum + (d_eta.get_global(((bpp_items_prefix) + (j))) * d_tau.get_global(((bpp_items_prefix) + (j)))); possible_items_to_this_bin++; } } if (((possible_items_to_this_bin) > 0)) { //Calculate Probabilities for (int j = 0; ((j) < (itemtypess)); j++) { double tmp = d_eta.get_global(bpp_items_prefix + j); double tmp2 = d_tau.get_global(bpp_items_prefix + j); double thisthat = ((tmp * tmp2) / (eta_tau_sum)); d_probabilities.set_global((bpp_items_prefix + j), thisthat); } eta_tau_sum = 0.0; //Perform probabilistic selection double random = hiprand_uniform(&d_rand_states_ind[ant_index]); int select_index = 0; int object_j = 0; double sum = 0.0; double prob = 0.0; while ((sum <= random) && (select_index < itemtypess)){ prob = d_probabilities.get_global(bpp_items_prefix+select_index); if(prob > 0.0){ sum += prob; object_j = select_index; } select_index++; } d_bins.set_global(ant_index * (int) itemcountt + i + 1, (object_j)); weight_object_j = bpp_items_weight.get_global(object_j); actual_bin_weight += (weight_object_j); copy_bpp_items_quantity.set_global((bpp_items_prefix + object_j),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_j) - 1)); n_items_in_actual_bin++; } else { bins_used++; object_index = 0; object_weightmax = 0; for (int k = 0; ((k) < (itemtypess)); k++) { object_quantity = copy_bpp_items_quantity.get_global((bpp_items_prefix + k)); new_object_weight = bpp_items_weight.get_global((k)); if (((object_quantity) > 0) && (((new_object_weight) > (object_weightmax)))) { object_index = (k); object_weightmax = (new_object_weight); } } copy_bpp_items_quantity.set_global((bpp_items_prefix + object_index), (copy_bpp_items_quantity.get_global(bpp_items_prefix + object_index) - 1)); d_bins.set_global(((((ant_index) * ((itemcountt))) + (i)) + 1), (object_index)); n_items_in_actual_bin = 1; actual_bin_weight = (object_weightmax); // if(ant_index == 0){ // printf("\n New Bin %i: \n\t Add %i - Weight %i",bins_used, object_index, object_weightmax); // } } } return (bins_used); } void init(int device){ d_bins.init(device); copy_bpp_items_quantity.init(device); bpp_items_quantity.init(device); d_eta.init(device); d_tau.init(device); d_probabilities.init(device); bpp_items_weight.init(device); d_phero.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int object_weight; int itemtypess; int itemcountt; int BETA2; int bin_capacity2; hiprandState_t* d_rand_states_ind; mkt::DeviceArray<int> d_bins; mkt::DeviceArray<int> copy_bpp_items_quantity; mkt::DeviceArray<int> bpp_items_quantity; mkt::DeviceArray<double> d_eta; mkt::DeviceArray<double> d_tau; mkt::DeviceArray<double> d_probabilities; mkt::DeviceArray<int> bpp_items_weight; mkt::DeviceArray<double> d_phero; }; struct Evaporation_kernel_map_index_in_place_array_functor{ Evaporation_kernel_map_index_in_place_array_functor(const mkt::DArray<double>& _d_phero) : d_phero(_d_phero){} ~Evaporation_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, double y){ double result = 0.0; double RO = (EVAPORATION2); if((((iindex) % (itemtypess)) != 0)){ result = ((1 - (RO)) * d_phero.get_global((iindex))/* TODO: For multiple GPUs*/); } return (result); } void init(int device){ d_phero.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; double EVAPORATION2; mkt::DeviceArray<double> d_phero; }; struct Update_pheromones_kernel_map_index_in_place_array_functor{ Update_pheromones_kernel_map_index_in_place_array_functor(const mkt::DArray<int>& _d_fitness, const mkt::DArray<int>& _d_bins, const mkt::DArray<double>& _d_phero, const mkt::DArray<int>& _bpp_items_weight) : d_fitness(_d_fitness), d_bins(_d_bins), d_phero(_d_phero), bpp_items_weight(_bpp_items_weight){} ~Update_pheromones_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, int value){ int ant_index = (iindex); double ant_fitness = (d_fitness.get_global((ant_index))); double actual_bin_weight = 0.0; int actual_bin_object_index = 0; int actual_bin_n_objects = 0; for (int i = 0; ((i) < (itemcountt)); i++) { int object_i = d_bins.get_global((((ant_index) * (itemcountt)) + (i))); double object_weight = bpp_items_weight.get_global(object_i); if ((((actual_bin_weight) + (object_weight)) <= (bin_capacity2))) { actual_bin_n_objects = ((actual_bin_n_objects) + 1); actual_bin_weight = ((actual_bin_weight) + (object_weight)); } else { for (int j = 0; ((j) < (actual_bin_n_objects)); j++) { for (int k = ((j) + 1); ((k) < (actual_bin_n_objects)); k++) { int object_i = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (j))); int object_j = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (k))); double delta_pheromone = ((Q) / ant_fitness); d_phero.set_global((((object_i) * (itemtypee)) + (object_j)), ((delta_pheromone) + d_phero.get_global((((object_i) * (itemtypee)) + (object_j))))); d_phero.set_global((((object_j) * (itemtypee)) + (object_i)), ((delta_pheromone) + d_phero.get_global((((object_j) * (itemtypee)) + (object_i))))); } } actual_bin_n_objects = 1; actual_bin_weight = (object_weight); actual_bin_object_index = (i); } } //printf("%d;", value); return (value); } void init(int device){ d_fitness.init(device); d_bins.init(device); d_phero.init(device); bpp_items_weight.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemcountt; int itemtypee; int bin_capacity2; mkt::DeviceArray<int> d_fitness; mkt::DeviceArray<int> d_bins; mkt::DeviceArray<double> d_phero; mkt::DeviceArray<int> bpp_items_weight; }; template<> int mkt::reduce_min<int>(mkt::DArray<int>& a){ int local_result = std::numeric_limits<int>::max(); const int gpu_elements = a.get_size_gpu(); int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2 int blocks = (gpu_elements + threads - 1) / threads; //hipSetDevice(0); int* d_odata; hipMalloc((void**) &d_odata, blocks * sizeof(int)); int* devptr = a.get_device_pointer(0); mkt::kernel::reduce_min_call(gpu_elements, devptr, d_odata, threads, blocks, mkt::cuda_streams[0], 0); // fold on gpus: step 2 while(blocks > 1){ int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2 int blocks_2 = (blocks + threads_2 - 1) / threads_2; mkt::kernel::reduce_min_call(blocks, d_odata, d_odata, threads_2, blocks_2, mkt::cuda_streams[0], 0); blocks = blocks_2; } // copy final sum from device to host hipMemcpyAsync(&local_result, d_odata, sizeof(int), hipMemcpyDeviceToHost, mkt::cuda_streams[0]); mkt::sync_streams(); hipFree(d_odata); return local_result; } __global__ void setup_rand_kernel(hiprandState_t * state, unsigned long seed) { int id = blockIdx.x * blockDim.x + threadIdx.x; hiprand_init(seed, id, 0, &state[id]); // hiprand_init(1234, id, 0, &state[id]); } int main(int argc, char** argv) { mkt::init(); char *n_iterationschar = argv[1]; int n_iterations = atoi(n_iterationschar); char *problemchar = argv[2]; int problem = atoi(problemchar); char *antschar = argv[3]; int ants = atoi(antschar); randoms = new Randoms(15); std::ifstream fileReader; //Problem Instances std::string file_to_read = ""; //Problem Instances //std::string f60 = "/home/n/n_herr03/BPP/BPP/source/bpp/Falkenauer_t60_00.txt"; //std::string p201 = "/home/n/n_herr03/BPP/BPP/source/bpp/201_2500_NR_0.txt"; //std::string p402 = "/home/n/n_herr03/BPP/BPP/source/bpp/402_10000_NR_0.txt"; //std::string p600 = "/home/n/n_herr03/BPP/BPP/source/bpp/600_20000_NR_0.txt"; //std::string p801 = "/home/n/n_herr03/BPP/BPP/source/bpp/801_40000_NR_0.txt"; //std::string p1002 = "/home/n/n_herr03/BPP/BPP/source/bpp/1002_80000_NR_0.txt"; //if(PALMA){- std::string f60 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/Falkenauer_t60_00.txt"; std::string p201 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/201_2500_NR_0.txt"; std::string p402 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/402_10000_NR_0.txt"; std::string p600 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/600_20000_NR_0.txt"; std::string p801 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/801_40000_NR_0.txt"; std::string p1002 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/1002_80000_NR_0.txt"; //+ //} switch(problem){ case 0: fileReader.open(f60, std::ifstream::in); break; case 1: fileReader.open(p201, std::ifstream::in); break; case 2: fileReader.open(p402, std::ifstream::in); break; case 3: fileReader.open(p600, std::ifstream::in); break; case 4: fileReader.open(p801, std::ifstream::in); break; case 5: fileReader.open(p1002, std::ifstream::in); break; default: break; } if (fileReader.is_open()) { fileReader >> itemtypes; fileReader >> bin_capacity; } fileReader.close(); int pheromone_matrix_size = itemtypes * itemtypes; mkt::DArray<double> d_phero(0, pheromone_matrix_size, pheromone_matrix_size, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> d_fitness(0, ants, ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_probabilities(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_eta(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_tau(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> bpp_items_quantity(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> copy_bpp_items_quantity(0, itemtypes*ants, itemtypes*ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY); hiprandState_t* d_rand_states_ind; hipMalloc(&d_rand_states_ind, ants * sizeof(hiprandState_t)); hipLaunchKernelGGL(( setup_rand_kernel), dim3(ants), dim3(1), 0, 0, d_rand_states_ind, time(NULL)); d_fitness.update_devices(); d_probabilities.update_devices(); d_eta.update_devices(); d_tau.update_devices(); double randn; for(int j = 0;j<itemtypes;j++){ for(int k = 0;k<itemtypes;k++){ randn = randoms -> Uniforme() * TAUMAX; d_phero[(j*itemtypes) + k] = randn; d_phero[(k*itemtypes) + j] = randn; } } d_phero.update_devices(); int lines = 0; double total = 0.0; switch(problem){ case 0: fileReader.open(f60, std::ifstream::in); break; case 1: fileReader.open(p201, std::ifstream::in); break; case 2: fileReader.open(p402, std::ifstream::in); break; case 3: fileReader.open(p600, std::ifstream::in); break; case 4: fileReader.open(p801, std::ifstream::in); break; case 5: fileReader.open(p1002, std::ifstream::in); break; default: break; } if (fileReader.is_open()) { fileReader >> itemtypes; fileReader >> bin_capacity; while (lines < itemtypes && !fileReader.eof()) { double weight; double quantity; fileReader >> weight; fileReader >> quantity; bpp_items_weight[lines] = weight; bpp_items_quantity[lines] = quantity; total+=quantity; lines++; } } else{ printf("\nFile not opened"); } bpp_items_weight.update_devices(); bpp_items_quantity.update_devices(); itemcount = total; (PRINT && !PALMA)?printf("\nSetup Description"):printf(""); (PRINT && !PALMA)?printf("\n\tObject Types: %d" , itemtypes):printf(""); (PRINT && !PALMA)?printf("\n\tObject Total: %d" , itemcount):printf(""); (PRINT && !PALMA)?printf("\n\tAnts: %d \n\tProblem %d:\n", ants, problem):printf(""); fileReader.close(); (PRINT && !PALMA)?printf("\t\t%d itemstypes \n\t\t%d items \n\t\t%d capacity\n\n", itemtypes, itemcount, (bin_capacity)):printf(""); mkt::sync_streams(); std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now(); int best_fitness = 999999; printf("\n%d; %d; %d; %d;", ants, problem,itemtypes,itemcount); mkt::DArray<int> d_bins(0, ants*itemcount, ants*itemcount, 0, 1, 0, 0, mkt::DIST, mkt::COPY); d_bins.update_devices(); Copybppitemsquantity_map_index_in_place_array_functor copybppitemsquantity_map_index_in_place_array_functor{bpp_items_quantity}; Copybppitemsweight_map_index_in_place_array_functor copybppitemsweight_map_index_in_place_array_functor{bpp_items_weight}; Packing_kernel_map_index_in_place_array_functor packing_kernel_map_index_in_place_array_functor{d_bins, copy_bpp_items_quantity, bpp_items_quantity, d_eta, d_tau, d_probabilities, bpp_items_weight, d_phero, d_rand_states_ind}; Evaporation_kernel_map_index_in_place_array_functor evaporation_kernel_map_index_in_place_array_functor{d_phero}; Update_pheromones_kernel_map_index_in_place_array_functor update_pheromones_kernel_map_index_in_place_array_functor{d_fitness, d_bins, d_phero, bpp_items_weight}; int BLOCK_SIZE = 256; int n_blocks = ants / BLOCK_SIZE; int n_threads = ants / n_blocks; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float milliseconds = 0; double packt = 0.0; mkt::sync_streams(); std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now(); for(int iterate = 0; ((iterate) < (n_iterations)); iterate++){ int maxobject = 0; packing_kernel_map_index_in_place_array_functor.object_weight = (maxobject); packing_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes); packing_kernel_map_index_in_place_array_functor.itemcountt = (itemcount); packing_kernel_map_index_in_place_array_functor.BETA2 = (BETA); packing_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity); mkt::map_index_in_place<int, Packing_kernel_map_index_in_place_array_functor>(d_fitness, packing_kernel_map_index_in_place_array_functor, n_threads, n_blocks); evaporation_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes); evaporation_kernel_map_index_in_place_array_functor.EVAPORATION2 = (EVAPORATION); mkt::map_index_in_place<double, Evaporation_kernel_map_index_in_place_array_functor>(d_phero, evaporation_kernel_map_index_in_place_array_functor, itemtypes, itemtypes); int new_best_fitness = mkt::reduce_min<int>(d_fitness); if (best_fitness > new_best_fitness) best_fitness = new_best_fitness; update_pheromones_kernel_map_index_in_place_array_functor.itemtypee = (itemtypes); update_pheromones_kernel_map_index_in_place_array_functor.itemcountt = (itemcount); update_pheromones_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity); mkt::map_index_in_place<int, Update_pheromones_kernel_map_index_in_place_array_functor>(d_fitness, update_pheromones_kernel_map_index_in_place_array_functor, n_threads, n_blocks); //(PRINT && !PALMA)?printf("\nBest Fitness (Number of bins used) Iteration %d: %d", iterate, best_fitness):printf(""); } mkt::sync_streams(); std::chrono::high_resolution_clock::time_point complete_timer_end = std::chrono::high_resolution_clock::now(); double complete_seconds = std::chrono::duration<double>(complete_timer_end - complete_timer_start).count(); if (PRINT & !PALMA) { printf("\nResults:"); printf("\n\tSeconds: %.5f;", complete_seconds); printf("\n\tFitness: %d;\n", best_fitness); } if (PALMA) { printf("%.5f; ", complete_seconds); printf(" %d;", best_fitness); } return EXIT_SUCCESS; }
29659f47be19736b4a55f40323596e97b0462257.cu
#include <cuda.h> #include <omp.h> #include <stdlib.h> #include <math.h> #include <array> #include <vector> #include <sstream> #include <fstream> #include <chrono> #include <curand_kernel.h> #include <limits> #include <memory> #include <cstddef> #include <type_traits> #include "../include/musket.cuh" #include "../include/BPP_0.cuh" #include "Randoms.cpp" Randoms *randoms; const int BETA = 1; const double EVAPORATION = 0.5; const int TAUMAX = 2; const int Q = 32; int itemtypes = 50; int itemcount = 59; auto bin_capacity = 1000; bool PRINT = true; bool PALMA = true; struct Copybppitemsquantity_map_index_in_place_array_functor{ Copybppitemsquantity_map_index_in_place_array_functor(const mkt::DArray<int>& _bpp_items_quantity) : bpp_items_quantity(_bpp_items_quantity){} ~Copybppitemsquantity_map_index_in_place_array_functor() {} __device__ auto operator()(int indexx, int valuee){ int new_index = ((indexx) % (itemtypess)); // printf("%d;%d;%d;%d;%d\n", indexx, itemtypess, valuee, new_index, bpp_items_quantity.get_global((new_index))); return bpp_items_quantity.get_global((new_index)); } void init(int device){ bpp_items_quantity.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; mkt::DeviceArray<int> bpp_items_quantity; }; struct Copybppitemsweight_map_index_in_place_array_functor{ Copybppitemsweight_map_index_in_place_array_functor(const mkt::DArray<int>& _bpp_items_weight) : bpp_items_weight(_bpp_items_weight){} ~Copybppitemsweight_map_index_in_place_array_functor() {} __device__ auto operator()(int indexx, int valuee){ int new_index = ((indexx) % (itemtypess)); return bpp_items_weight.get_global((new_index))/* TODO: For multiple GPUs*/; } void init(int device){ bpp_items_weight.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; mkt::DeviceArray<int> bpp_items_weight; }; struct Packing_kernel_map_index_in_place_array_functor{ Packing_kernel_map_index_in_place_array_functor(const mkt::DArray<int>& _d_bins, const mkt::DArray<int>& _copy_bpp_items_quantity, const mkt::DArray<int>& _bpp_items_quantity, const mkt::DArray<double>& _d_eta, const mkt::DArray<double>& _d_tau, const mkt::DArray<double>& _d_probabilities, const mkt::DArray<int>& _bpp_items_weight, const mkt::DArray<double>& _d_phero, curandState* _d_rand_states_ind) : d_bins(_d_bins), copy_bpp_items_quantity(_copy_bpp_items_quantity), bpp_items_quantity(_bpp_items_quantity), d_eta(_d_eta), d_tau(_d_tau), d_probabilities(_d_probabilities), bpp_items_weight(_bpp_items_weight), d_phero(_d_phero), d_rand_states_ind(_d_rand_states_ind){} ~Packing_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, int y){ int ant_index = (iindex); int object_bin_index = ((ant_index) * (itemcountt)); int bins_used = 0; int bpp_items_prefix = (ant_index) * (itemtypess); int object_weightmax = 0; int actual_bin_weight = 0; int n_items_in_actual_bin = 0; int possible_items_to_this_bin = 0; int object_index = 0; int object_quantity = 0; int new_object_weight = 0; //ADD heaviest Object - ok for (int i = 0; ((i) < (itemtypess)); i++) { copy_bpp_items_quantity.set_global(bpp_items_prefix + i, bpp_items_quantity.get_global(i)); new_object_weight = bpp_items_weight.get_global(i); object_quantity = copy_bpp_items_quantity.get_global(i); if((object_quantity > 0) && (new_object_weight > object_weightmax)){ object_index = i; object_weightmax = new_object_weight; } } d_bins.set_global(((ant_index) * (itemcountt)), (object_index)); copy_bpp_items_quantity.set_global(((bpp_items_prefix) + (object_index)), (copy_bpp_items_quantity.get_global(((bpp_items_prefix) + (object_index))) - 1)); n_items_in_actual_bin++; actual_bin_weight += (object_weightmax); bins_used++; int weight_object_j; int object_i; int quantity_object_j; for (int i = 0; ((i) < ((itemcountt) - 1)); i++) { double eta_tau_sum = 0.0; possible_items_to_this_bin = 0; //Search POssible Items for (int j = 0; ((j) < (itemtypess)); j++) { d_eta.set_global(((bpp_items_prefix) + (j)), 0.0); d_tau.set_global(((bpp_items_prefix) + (j)), 0.0); d_probabilities.set_global(((bpp_items_prefix) + (j)), 0.0); weight_object_j = bpp_items_weight.get_global((j)); quantity_object_j = copy_bpp_items_quantity.get_global(bpp_items_prefix + j); if (((quantity_object_j) > 0) && ((weight_object_j) <= ((bin_capacity2) - (actual_bin_weight)))) { for (int k = 0; ((k) < (n_items_in_actual_bin)); k++) { object_i = d_bins.get_global((((object_bin_index) + (i)) - (k))); d_eta.set_global(((bpp_items_prefix) + (j)), d_phero.get_global(object_i * (int) itemtypess + j)); } d_eta.set_global(((bpp_items_prefix) + (j)), (d_eta.get_global(((bpp_items_prefix) + (j))) / (n_items_in_actual_bin))); d_tau.set_global(((bpp_items_prefix) + (j)), (double) pow(weight_object_j, BETA)); eta_tau_sum = eta_tau_sum + (d_eta.get_global(((bpp_items_prefix) + (j))) * d_tau.get_global(((bpp_items_prefix) + (j)))); possible_items_to_this_bin++; } } if (((possible_items_to_this_bin) > 0)) { //Calculate Probabilities for (int j = 0; ((j) < (itemtypess)); j++) { double tmp = d_eta.get_global(bpp_items_prefix + j); double tmp2 = d_tau.get_global(bpp_items_prefix + j); double thisthat = ((tmp * tmp2) / (eta_tau_sum)); d_probabilities.set_global((bpp_items_prefix + j), thisthat); } eta_tau_sum = 0.0; //Perform probabilistic selection double random = curand_uniform(&d_rand_states_ind[ant_index]); int select_index = 0; int object_j = 0; double sum = 0.0; double prob = 0.0; while ((sum <= random) && (select_index < itemtypess)){ prob = d_probabilities.get_global(bpp_items_prefix+select_index); if(prob > 0.0){ sum += prob; object_j = select_index; } select_index++; } d_bins.set_global(ant_index * (int) itemcountt + i + 1, (object_j)); weight_object_j = bpp_items_weight.get_global(object_j); actual_bin_weight += (weight_object_j); copy_bpp_items_quantity.set_global((bpp_items_prefix + object_j),(copy_bpp_items_quantity.get_global(bpp_items_prefix + object_j) - 1)); n_items_in_actual_bin++; } else { bins_used++; object_index = 0; object_weightmax = 0; for (int k = 0; ((k) < (itemtypess)); k++) { object_quantity = copy_bpp_items_quantity.get_global((bpp_items_prefix + k)); new_object_weight = bpp_items_weight.get_global((k)); if (((object_quantity) > 0) && (((new_object_weight) > (object_weightmax)))) { object_index = (k); object_weightmax = (new_object_weight); } } copy_bpp_items_quantity.set_global((bpp_items_prefix + object_index), (copy_bpp_items_quantity.get_global(bpp_items_prefix + object_index) - 1)); d_bins.set_global(((((ant_index) * ((itemcountt))) + (i)) + 1), (object_index)); n_items_in_actual_bin = 1; actual_bin_weight = (object_weightmax); // if(ant_index == 0){ // printf("\n New Bin %i: \n\t Add %i - Weight %i",bins_used, object_index, object_weightmax); // } } } return (bins_used); } void init(int device){ d_bins.init(device); copy_bpp_items_quantity.init(device); bpp_items_quantity.init(device); d_eta.init(device); d_tau.init(device); d_probabilities.init(device); bpp_items_weight.init(device); d_phero.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int object_weight; int itemtypess; int itemcountt; int BETA2; int bin_capacity2; curandState* d_rand_states_ind; mkt::DeviceArray<int> d_bins; mkt::DeviceArray<int> copy_bpp_items_quantity; mkt::DeviceArray<int> bpp_items_quantity; mkt::DeviceArray<double> d_eta; mkt::DeviceArray<double> d_tau; mkt::DeviceArray<double> d_probabilities; mkt::DeviceArray<int> bpp_items_weight; mkt::DeviceArray<double> d_phero; }; struct Evaporation_kernel_map_index_in_place_array_functor{ Evaporation_kernel_map_index_in_place_array_functor(const mkt::DArray<double>& _d_phero) : d_phero(_d_phero){} ~Evaporation_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, double y){ double result = 0.0; double RO = (EVAPORATION2); if((((iindex) % (itemtypess)) != 0)){ result = ((1 - (RO)) * d_phero.get_global((iindex))/* TODO: For multiple GPUs*/); } return (result); } void init(int device){ d_phero.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemtypess; double EVAPORATION2; mkt::DeviceArray<double> d_phero; }; struct Update_pheromones_kernel_map_index_in_place_array_functor{ Update_pheromones_kernel_map_index_in_place_array_functor(const mkt::DArray<int>& _d_fitness, const mkt::DArray<int>& _d_bins, const mkt::DArray<double>& _d_phero, const mkt::DArray<int>& _bpp_items_weight) : d_fitness(_d_fitness), d_bins(_d_bins), d_phero(_d_phero), bpp_items_weight(_bpp_items_weight){} ~Update_pheromones_kernel_map_index_in_place_array_functor() {} __device__ auto operator()(int iindex, int value){ int ant_index = (iindex); double ant_fitness = (d_fitness.get_global((ant_index))); double actual_bin_weight = 0.0; int actual_bin_object_index = 0; int actual_bin_n_objects = 0; for (int i = 0; ((i) < (itemcountt)); i++) { int object_i = d_bins.get_global((((ant_index) * (itemcountt)) + (i))); double object_weight = bpp_items_weight.get_global(object_i); if ((((actual_bin_weight) + (object_weight)) <= (bin_capacity2))) { actual_bin_n_objects = ((actual_bin_n_objects) + 1); actual_bin_weight = ((actual_bin_weight) + (object_weight)); } else { for (int j = 0; ((j) < (actual_bin_n_objects)); j++) { for (int k = ((j) + 1); ((k) < (actual_bin_n_objects)); k++) { int object_i = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (j))); int object_j = d_bins.get_global(((((ant_index) * (itemcountt)) + (actual_bin_object_index)) + (k))); double delta_pheromone = ((Q) / ant_fitness); d_phero.set_global((((object_i) * (itemtypee)) + (object_j)), ((delta_pheromone) + d_phero.get_global((((object_i) * (itemtypee)) + (object_j))))); d_phero.set_global((((object_j) * (itemtypee)) + (object_i)), ((delta_pheromone) + d_phero.get_global((((object_j) * (itemtypee)) + (object_i))))); } } actual_bin_n_objects = 1; actual_bin_weight = (object_weight); actual_bin_object_index = (i); } } //printf("%d;", value); return (value); } void init(int device){ d_fitness.init(device); d_bins.init(device); d_phero.init(device); bpp_items_weight.init(device); } size_t get_smem_bytes(){ size_t result = 0; return result; } int itemcountt; int itemtypee; int bin_capacity2; mkt::DeviceArray<int> d_fitness; mkt::DeviceArray<int> d_bins; mkt::DeviceArray<double> d_phero; mkt::DeviceArray<int> bpp_items_weight; }; template<> int mkt::reduce_min<int>(mkt::DArray<int>& a){ int local_result = std::numeric_limits<int>::max(); const int gpu_elements = a.get_size_gpu(); int threads = gpu_elements < 1024 ? gpu_elements : 1024; // nextPow2 int blocks = (gpu_elements + threads - 1) / threads; //cudaSetDevice(0); int* d_odata; cudaMalloc((void**) &d_odata, blocks * sizeof(int)); int* devptr = a.get_device_pointer(0); mkt::kernel::reduce_min_call(gpu_elements, devptr, d_odata, threads, blocks, mkt::cuda_streams[0], 0); // fold on gpus: step 2 while(blocks > 1){ int threads_2 = blocks < 1024 ? blocks : 1024; // nextPow2 int blocks_2 = (blocks + threads_2 - 1) / threads_2; mkt::kernel::reduce_min_call(blocks, d_odata, d_odata, threads_2, blocks_2, mkt::cuda_streams[0], 0); blocks = blocks_2; } // copy final sum from device to host cudaMemcpyAsync(&local_result, d_odata, sizeof(int), cudaMemcpyDeviceToHost, mkt::cuda_streams[0]); mkt::sync_streams(); cudaFree(d_odata); return local_result; } __global__ void setup_rand_kernel(curandState * state, unsigned long seed) { int id = blockIdx.x * blockDim.x + threadIdx.x; curand_init(seed, id, 0, &state[id]); // curand_init(1234, id, 0, &state[id]); } int main(int argc, char** argv) { mkt::init(); char *n_iterationschar = argv[1]; int n_iterations = atoi(n_iterationschar); char *problemchar = argv[2]; int problem = atoi(problemchar); char *antschar = argv[3]; int ants = atoi(antschar); randoms = new Randoms(15); std::ifstream fileReader; //Problem Instances std::string file_to_read = ""; //Problem Instances //std::string f60 = "/home/n/n_herr03/BPP/BPP/source/bpp/Falkenauer_t60_00.txt"; //std::string p201 = "/home/n/n_herr03/BPP/BPP/source/bpp/201_2500_NR_0.txt"; //std::string p402 = "/home/n/n_herr03/BPP/BPP/source/bpp/402_10000_NR_0.txt"; //std::string p600 = "/home/n/n_herr03/BPP/BPP/source/bpp/600_20000_NR_0.txt"; //std::string p801 = "/home/n/n_herr03/BPP/BPP/source/bpp/801_40000_NR_0.txt"; //std::string p1002 = "/home/n/n_herr03/BPP/BPP/source/bpp/1002_80000_NR_0.txt"; //if(PALMA){- std::string f60 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/Falkenauer_t60_00.txt"; std::string p201 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/201_2500_NR_0.txt"; std::string p402 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/402_10000_NR_0.txt"; std::string p600 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/600_20000_NR_0.txt"; std::string p801 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/801_40000_NR_0.txt"; std::string p1002 = "/home/schredder/Research/HLPP/2020/ACO_Breno/BPP/MusketProgram/1002_80000_NR_0.txt"; //+ //} switch(problem){ case 0: fileReader.open(f60, std::ifstream::in); break; case 1: fileReader.open(p201, std::ifstream::in); break; case 2: fileReader.open(p402, std::ifstream::in); break; case 3: fileReader.open(p600, std::ifstream::in); break; case 4: fileReader.open(p801, std::ifstream::in); break; case 5: fileReader.open(p1002, std::ifstream::in); break; default: break; } if (fileReader.is_open()) { fileReader >> itemtypes; fileReader >> bin_capacity; } fileReader.close(); int pheromone_matrix_size = itemtypes * itemtypes; mkt::DArray<double> d_phero(0, pheromone_matrix_size, pheromone_matrix_size, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> d_fitness(0, ants, ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_probabilities(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_eta(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<double> d_tau(0, ants*itemtypes, ants*itemtypes, 0.0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> bpp_items_weight(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> bpp_items_quantity(0, itemtypes, itemtypes, 0, 1, 0, 0, mkt::DIST, mkt::COPY); mkt::DArray<int> copy_bpp_items_quantity(0, itemtypes*ants, itemtypes*ants, 0, 1, 0, 0, mkt::DIST, mkt::COPY); curandState* d_rand_states_ind; cudaMalloc(&d_rand_states_ind, ants * sizeof(curandState)); setup_rand_kernel<<<ants, 1, 0>>>(d_rand_states_ind, time(NULL)); d_fitness.update_devices(); d_probabilities.update_devices(); d_eta.update_devices(); d_tau.update_devices(); double randn; for(int j = 0;j<itemtypes;j++){ for(int k = 0;k<itemtypes;k++){ randn = randoms -> Uniforme() * TAUMAX; d_phero[(j*itemtypes) + k] = randn; d_phero[(k*itemtypes) + j] = randn; } } d_phero.update_devices(); int lines = 0; double total = 0.0; switch(problem){ case 0: fileReader.open(f60, std::ifstream::in); break; case 1: fileReader.open(p201, std::ifstream::in); break; case 2: fileReader.open(p402, std::ifstream::in); break; case 3: fileReader.open(p600, std::ifstream::in); break; case 4: fileReader.open(p801, std::ifstream::in); break; case 5: fileReader.open(p1002, std::ifstream::in); break; default: break; } if (fileReader.is_open()) { fileReader >> itemtypes; fileReader >> bin_capacity; while (lines < itemtypes && !fileReader.eof()) { double weight; double quantity; fileReader >> weight; fileReader >> quantity; bpp_items_weight[lines] = weight; bpp_items_quantity[lines] = quantity; total+=quantity; lines++; } } else{ printf("\nFile not opened"); } bpp_items_weight.update_devices(); bpp_items_quantity.update_devices(); itemcount = total; (PRINT && !PALMA)?printf("\nSetup Description"):printf(""); (PRINT && !PALMA)?printf("\n\tObject Types: %d" , itemtypes):printf(""); (PRINT && !PALMA)?printf("\n\tObject Total: %d" , itemcount):printf(""); (PRINT && !PALMA)?printf("\n\tAnts: %d \n\tProblem %d:\n", ants, problem):printf(""); fileReader.close(); (PRINT && !PALMA)?printf("\t\t%d itemstypes \n\t\t%d items \n\t\t%d capacity\n\n", itemtypes, itemcount, (bin_capacity)):printf(""); mkt::sync_streams(); std::chrono::high_resolution_clock::time_point timer_start = std::chrono::high_resolution_clock::now(); int best_fitness = 999999; printf("\n%d; %d; %d; %d;", ants, problem,itemtypes,itemcount); mkt::DArray<int> d_bins(0, ants*itemcount, ants*itemcount, 0, 1, 0, 0, mkt::DIST, mkt::COPY); d_bins.update_devices(); Copybppitemsquantity_map_index_in_place_array_functor copybppitemsquantity_map_index_in_place_array_functor{bpp_items_quantity}; Copybppitemsweight_map_index_in_place_array_functor copybppitemsweight_map_index_in_place_array_functor{bpp_items_weight}; Packing_kernel_map_index_in_place_array_functor packing_kernel_map_index_in_place_array_functor{d_bins, copy_bpp_items_quantity, bpp_items_quantity, d_eta, d_tau, d_probabilities, bpp_items_weight, d_phero, d_rand_states_ind}; Evaporation_kernel_map_index_in_place_array_functor evaporation_kernel_map_index_in_place_array_functor{d_phero}; Update_pheromones_kernel_map_index_in_place_array_functor update_pheromones_kernel_map_index_in_place_array_functor{d_fitness, d_bins, d_phero, bpp_items_weight}; int BLOCK_SIZE = 256; int n_blocks = ants / BLOCK_SIZE; int n_threads = ants / n_blocks; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; double packt = 0.0; mkt::sync_streams(); std::chrono::high_resolution_clock::time_point complete_timer_start = std::chrono::high_resolution_clock::now(); for(int iterate = 0; ((iterate) < (n_iterations)); iterate++){ int maxobject = 0; packing_kernel_map_index_in_place_array_functor.object_weight = (maxobject); packing_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes); packing_kernel_map_index_in_place_array_functor.itemcountt = (itemcount); packing_kernel_map_index_in_place_array_functor.BETA2 = (BETA); packing_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity); mkt::map_index_in_place<int, Packing_kernel_map_index_in_place_array_functor>(d_fitness, packing_kernel_map_index_in_place_array_functor, n_threads, n_blocks); evaporation_kernel_map_index_in_place_array_functor.itemtypess = (itemtypes); evaporation_kernel_map_index_in_place_array_functor.EVAPORATION2 = (EVAPORATION); mkt::map_index_in_place<double, Evaporation_kernel_map_index_in_place_array_functor>(d_phero, evaporation_kernel_map_index_in_place_array_functor, itemtypes, itemtypes); int new_best_fitness = mkt::reduce_min<int>(d_fitness); if (best_fitness > new_best_fitness) best_fitness = new_best_fitness; update_pheromones_kernel_map_index_in_place_array_functor.itemtypee = (itemtypes); update_pheromones_kernel_map_index_in_place_array_functor.itemcountt = (itemcount); update_pheromones_kernel_map_index_in_place_array_functor.bin_capacity2 = (bin_capacity); mkt::map_index_in_place<int, Update_pheromones_kernel_map_index_in_place_array_functor>(d_fitness, update_pheromones_kernel_map_index_in_place_array_functor, n_threads, n_blocks); //(PRINT && !PALMA)?printf("\nBest Fitness (Number of bins used) Iteration %d: %d", iterate, best_fitness):printf(""); } mkt::sync_streams(); std::chrono::high_resolution_clock::time_point complete_timer_end = std::chrono::high_resolution_clock::now(); double complete_seconds = std::chrono::duration<double>(complete_timer_end - complete_timer_start).count(); if (PRINT & !PALMA) { printf("\nResults:"); printf("\n\tSeconds: %.5f;", complete_seconds); printf("\n\tFitness: %d;\n", best_fitness); } if (PALMA) { printf("%.5f; ", complete_seconds); printf(" %d;", best_fitness); } return EXIT_SUCCESS; }
02285b98ecc9453d95a9594746f4628ff39c0bea.hip
// !!! This is a file automatically generated by hipify!!! #include "hipacc_cu.hpp" #include "cuGaussianFilterXY.hip" #include <algorithm> #include <cmath> #include <cstdlib> #include <iostream> #include <vector> #include <sys/time.h> #include <opencv2/opencv.hpp> //--- #ifndef BH_MODE # define BH_MODE CLAMP #endif #define PIXEL_CAST(a) (pixel_t)(a) #define TMP_PIXEL_CAST(a) (tmp_pixel_t)(a) #define pixel_t uchar #define tmp_pixel_t float #define USE_LAMBDA // get time in milliseconds double time_ms () { struct timeval tv; gettimeofday (&tv, NULL); return ((double)(tv.tv_sec) * 1e+3 + (double)(tv.tv_usec) * 1e-3); } // Gaussian blur filter in Hipacc #ifdef NO_SEP #else class GaussianFilterRow : public Kernel<tmp_pixel_t> { private: Accessor<pixel_t> &input; Mask<float> &mask; const int size; public: GaussianFilterRow(IterationSpace<tmp_pixel_t> &iter, Accessor<pixel_t> &input, Mask<float> &mask, const int size) : Kernel(iter), input(input), mask(mask), size(size) { add_accessor(&input); } #ifdef USE_LAMBDA void kernel() { output() = TMP_PIXEL_CAST(convolve(mask, Reduce::SUM, [&] () -> float { return mask() * input(mask); })); } #else void kernel() { const int anchor = size >> 1; float sum = 0.0f; for (int xf = -anchor; xf<=anchor; ++xf) { sum += mask(xf, 0) * input(xf, 0); } output() = TMP_PIXEL_CAST(sum); } #endif }; class GaussianFilterColumn: public Kernel<pixel_t> { private: Accessor<tmp_pixel_t> &input; Mask<float> &mask; const int size; public: GaussianFilterColumn(IterationSpace<pixel_t> &iter, Accessor<tmp_pixel_t> &input, Mask<float> &mask, const int size) : Kernel(iter), input(input), mask(mask), size(size) { add_accessor(&input); } #ifdef USE_LAMBDA void kernel() { output() = PIXEL_CAST(convolve(mask, Reduce::SUM, [&] () -> float { return mask() * input(mask); }) + 0.5f); } #else void kernel() { const int anchor = size >> 1; float sum = 0.5f; for (int yf = -anchor; yf<=anchor; ++yf) { sum += mask(0, yf) * input(0, yf); } output() = PIXEL_CAST(sum); } #endif }; #endif int main(int argc, const char *argv[]) { hipaccInitCUDA(); const int size_x = SIZE_X; const int size_y = SIZE_Y; const double sigma1 = ((size_x-1)*0.5 - 1)*0.3 + 0.8; const double sigma2 = ((size_y-1)*0.5 - 1)*0.3 + 0.8; // filter coefficients #ifdef CONST_MASK // only filter kernel sizes 3x3, 5x5, and 7x7 implemented if (size_x != size_y || !(size_x == 3 || size_x == 5 || size_x == 7)) { std::cerr << "Wrong filter kernel size. Currently supported values: 3x3, 5x5, and 7x7!" << std::endl; exit(EXIT_FAILURE); } // convolution filter mask const float filter_x[1][SIZE_X] = { #if SIZE_X == 3 { 0.238994f, 0.522011f, 0.238994f } #endif #if SIZE_X == 5 { 0.070766f, 0.244460f, 0.369546f, 0.244460f, 0.070766f } #endif #if SIZE_X == 7 { 0.028995f, 0.103818f, 0.223173f, 0.288026f, 0.223173f, 0.103818f, 0.028995f } #endif }; const float filter_y[SIZE_Y][1] = { #if SIZE_Y == 3 { 0.238994f }, { 0.522011f }, { 0.238994f } #endif #if SIZE_Y == 5 { 0.070766f }, { 0.244460f }, { 0.369546f }, { 0.244460f }, { 0.070766f } #endif #if SIZE_Y == 7 { 0.028995f }, { 0.103818f }, { 0.223173f }, { 0.288026f }, { 0.223173f }, { 0.103818f }, { 0.028995f } #endif }; const float filter_xy[SIZE_Y][SIZE_X] = { #if SIZE_X == 3 { 0.057118f, 0.124758f, 0.057118f }, { 0.124758f, 0.272496f, 0.124758f }, { 0.057118f, 0.124758f, 0.057118f } #endif #if SIZE_X == 5 { 0.005008f, 0.017300f, 0.026151f, 0.017300f, 0.005008f }, { 0.017300f, 0.059761f, 0.090339f, 0.059761f, 0.017300f }, { 0.026151f, 0.090339f, 0.136565f, 0.090339f, 0.026151f }, { 0.017300f, 0.059761f, 0.090339f, 0.059761f, 0.017300f }, { 0.005008f, 0.017300f, 0.026151f, 0.017300f, 0.005008f } #endif #if SIZE_X == 7 { 0.000841, 0.003010, 0.006471, 0.008351, 0.006471, 0.003010, 0.000841 }, { 0.003010, 0.010778, 0.023169, 0.029902, 0.023169, 0.010778, 0.003010 }, { 0.006471, 0.023169, 0.049806, 0.064280, 0.049806, 0.023169, 0.006471 }, { 0.008351, 0.029902, 0.064280, 0.082959, 0.064280, 0.029902, 0.008351 }, { 0.006471, 0.023169, 0.049806, 0.064280, 0.049806, 0.023169, 0.006471 }, { 0.003010, 0.010778, 0.023169, 0.029902, 0.023169, 0.010778, 0.003010 }, { 0.000841, 0.003010, 0.006471, 0.008351, 0.006471, 0.003010, 0.000841 } #endif }; #else float filter_x[1][SIZE_X]; float filter_y[SIZE_Y][1]; float filter_xy[SIZE_Y][SIZE_X]; double scale2X = -0.5/(sigma1*sigma1); double scale2Y = -0.5/(sigma2*sigma2); double sum_x = 0; double sum_y = 0; for (int i=0; i < size_x; ++i) { double x = i - (size_x-1)*0.5; double t = ::exp(scale2X*x*x); filter_x[0][i] = (float)t; sum_x += filter_x[0][i]; } for (int i=0; i < size_y; ++i) { double x = i - (size_y-1)*0.5; double t = ::exp(scale2Y*x*x); filter_y[i][0] = (float)t; sum_y += filter_y[i][0]; } sum_x = 1./sum_x; sum_y = 1./sum_y; for (int i=0; i < size_x; ++i) { filter_x[0][i] = (float)(filter_x[0][i]*sum_x); } for (int i=0; i < size_y; ++i) { filter_y[i][0] = (float)(filter_y[i][0]*sum_y); } for (int y=0; y < size_y; ++y) { for (int x=0; x < size_x; ++x) { filter_xy[y][x] = filter_x[0][x]*filter_y[y][0]; } } #endif cv::Mat frame, frame_colored; frame_colored = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR); cvtColor(frame_colored, frame, CV_BGR2GRAY); const int width = frame.cols; const int height = frame.rows; float timing = 0; // input and output image of width x height pixels HipaccImage input = hipaccCreateMemory<uchar>(NULL, width, height, 256); HipaccImage output = hipaccCreateMemory<uchar>(NULL, width, height, 256); HipaccImage tmp = hipaccCreateMemory<float>(NULL, width, height, 256); hipaccWriteMemory(input, frame.data); // filter mask hipaccWriteSymbol<float>((const void *)&_constmaskXY, (float *)filter_xy, 5, 5); HipaccAccessor iter_out(output); HipaccAccessor iter_tmp(tmp); #ifdef NO_SEP HipaccAccessor acc(input); hipacc_launch_info XY_info0(2, 2, iter_out, 8, 1); dim3 block0(32, 1); dim3 grid0(hipaccCalcGridFromBlock(XY_info0, block0)); hipaccPrepareKernelLaunch(XY_info0, block0); hipaccConfigureCall(grid0, block0); size_t offset0 = 0; hipGetTextureReference(&_texinputXYRef, &_texinputXY); hipaccBindTexture<uchar>(Linear1D, _texinputXYRef, acc.img); hipaccSetupArgument(&iter_out.img.mem, sizeof(uchar *), offset0); hipaccSetupArgument(&iter_out.width, sizeof(const int), offset0); hipaccSetupArgument(&iter_out.height, sizeof(const int), offset0); hipaccSetupArgument(&iter_out.img.stride, sizeof(const int), offset0); hipaccSetupArgument(&acc.width, sizeof(const int), offset0); hipaccSetupArgument(&acc.height, sizeof(const int), offset0); hipaccSetupArgument(&acc.img.stride, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_left, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_right, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_top, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_bottom, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_fall_back, sizeof(const int), offset0); hipaccLaunchKernel((const void *)&cuGaussianFilterXYKernel, "cuGaussianFilterXYKernel", grid0, block0); timing = hipacc_last_kernel_timing(); #else BoundaryCondition<pixel_t> cond_in(input, mask_x, Boundary::BH_MODE); Accessor<pixel_t> acc(cond_in); GaussianFilterRow X(iter_tmp, acc, mask_x, size_x); BoundaryCondition<tmp_pixel_t> cond_tmp(tmp, mask_y, Boundary::BH_MODE); Accessor<tmp_pixel_t> acc_tmp(cond_tmp); GaussianFilterColumn Y(iter_out, acc_tmp, mask_y, size_y); X.execute(); timing = hipacc_last_kernel_timing(); Y.execute(); timing += hipacc_last_kernel_timing(); #endif std::cerr << "Timing: " << timing << " ms, " << (width*height/timing)/1000 << " Mpixel/s" << std::endl; // OpenCV display image std::string outputfn; std::vector<int> compression_params; frame.data = hipaccReadMemory<uchar>(output); outputfn = argv[1]; outputfn = outputfn.substr(0, outputfn.find_last_of(".")) + "-filtered.jpg"; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); compression_params.push_back(100); try { cv::imwrite(outputfn, frame, compression_params); } catch(std::runtime_error &ex) { fprintf(stderr, "JPEG compression exception: %s\n", ex.what()); return 1; } cv::namedWindow("Result", cv::WINDOW_NORMAL); cv::imshow("Result", frame); cv::waitKey(0); fprintf(stdout, "Done!\n"); hipaccReleaseMemory<uchar>(output); hipaccReleaseMemory<float>(tmp); hipaccReleaseMemory<uchar>(input); return 0; }
02285b98ecc9453d95a9594746f4628ff39c0bea.cu
#include "hipacc_cu.hpp" #include "cuGaussianFilterXY.cu" #include <algorithm> #include <cmath> #include <cstdlib> #include <iostream> #include <vector> #include <sys/time.h> #include <opencv2/opencv.hpp> //--- #ifndef BH_MODE # define BH_MODE CLAMP #endif #define PIXEL_CAST(a) (pixel_t)(a) #define TMP_PIXEL_CAST(a) (tmp_pixel_t)(a) #define pixel_t uchar #define tmp_pixel_t float #define USE_LAMBDA // get time in milliseconds double time_ms () { struct timeval tv; gettimeofday (&tv, NULL); return ((double)(tv.tv_sec) * 1e+3 + (double)(tv.tv_usec) * 1e-3); } // Gaussian blur filter in Hipacc #ifdef NO_SEP #else class GaussianFilterRow : public Kernel<tmp_pixel_t> { private: Accessor<pixel_t> &input; Mask<float> &mask; const int size; public: GaussianFilterRow(IterationSpace<tmp_pixel_t> &iter, Accessor<pixel_t> &input, Mask<float> &mask, const int size) : Kernel(iter), input(input), mask(mask), size(size) { add_accessor(&input); } #ifdef USE_LAMBDA void kernel() { output() = TMP_PIXEL_CAST(convolve(mask, Reduce::SUM, [&] () -> float { return mask() * input(mask); })); } #else void kernel() { const int anchor = size >> 1; float sum = 0.0f; for (int xf = -anchor; xf<=anchor; ++xf) { sum += mask(xf, 0) * input(xf, 0); } output() = TMP_PIXEL_CAST(sum); } #endif }; class GaussianFilterColumn: public Kernel<pixel_t> { private: Accessor<tmp_pixel_t> &input; Mask<float> &mask; const int size; public: GaussianFilterColumn(IterationSpace<pixel_t> &iter, Accessor<tmp_pixel_t> &input, Mask<float> &mask, const int size) : Kernel(iter), input(input), mask(mask), size(size) { add_accessor(&input); } #ifdef USE_LAMBDA void kernel() { output() = PIXEL_CAST(convolve(mask, Reduce::SUM, [&] () -> float { return mask() * input(mask); }) + 0.5f); } #else void kernel() { const int anchor = size >> 1; float sum = 0.5f; for (int yf = -anchor; yf<=anchor; ++yf) { sum += mask(0, yf) * input(0, yf); } output() = PIXEL_CAST(sum); } #endif }; #endif int main(int argc, const char *argv[]) { hipaccInitCUDA(); const int size_x = SIZE_X; const int size_y = SIZE_Y; const double sigma1 = ((size_x-1)*0.5 - 1)*0.3 + 0.8; const double sigma2 = ((size_y-1)*0.5 - 1)*0.3 + 0.8; // filter coefficients #ifdef CONST_MASK // only filter kernel sizes 3x3, 5x5, and 7x7 implemented if (size_x != size_y || !(size_x == 3 || size_x == 5 || size_x == 7)) { std::cerr << "Wrong filter kernel size. Currently supported values: 3x3, 5x5, and 7x7!" << std::endl; exit(EXIT_FAILURE); } // convolution filter mask const float filter_x[1][SIZE_X] = { #if SIZE_X == 3 { 0.238994f, 0.522011f, 0.238994f } #endif #if SIZE_X == 5 { 0.070766f, 0.244460f, 0.369546f, 0.244460f, 0.070766f } #endif #if SIZE_X == 7 { 0.028995f, 0.103818f, 0.223173f, 0.288026f, 0.223173f, 0.103818f, 0.028995f } #endif }; const float filter_y[SIZE_Y][1] = { #if SIZE_Y == 3 { 0.238994f }, { 0.522011f }, { 0.238994f } #endif #if SIZE_Y == 5 { 0.070766f }, { 0.244460f }, { 0.369546f }, { 0.244460f }, { 0.070766f } #endif #if SIZE_Y == 7 { 0.028995f }, { 0.103818f }, { 0.223173f }, { 0.288026f }, { 0.223173f }, { 0.103818f }, { 0.028995f } #endif }; const float filter_xy[SIZE_Y][SIZE_X] = { #if SIZE_X == 3 { 0.057118f, 0.124758f, 0.057118f }, { 0.124758f, 0.272496f, 0.124758f }, { 0.057118f, 0.124758f, 0.057118f } #endif #if SIZE_X == 5 { 0.005008f, 0.017300f, 0.026151f, 0.017300f, 0.005008f }, { 0.017300f, 0.059761f, 0.090339f, 0.059761f, 0.017300f }, { 0.026151f, 0.090339f, 0.136565f, 0.090339f, 0.026151f }, { 0.017300f, 0.059761f, 0.090339f, 0.059761f, 0.017300f }, { 0.005008f, 0.017300f, 0.026151f, 0.017300f, 0.005008f } #endif #if SIZE_X == 7 { 0.000841, 0.003010, 0.006471, 0.008351, 0.006471, 0.003010, 0.000841 }, { 0.003010, 0.010778, 0.023169, 0.029902, 0.023169, 0.010778, 0.003010 }, { 0.006471, 0.023169, 0.049806, 0.064280, 0.049806, 0.023169, 0.006471 }, { 0.008351, 0.029902, 0.064280, 0.082959, 0.064280, 0.029902, 0.008351 }, { 0.006471, 0.023169, 0.049806, 0.064280, 0.049806, 0.023169, 0.006471 }, { 0.003010, 0.010778, 0.023169, 0.029902, 0.023169, 0.010778, 0.003010 }, { 0.000841, 0.003010, 0.006471, 0.008351, 0.006471, 0.003010, 0.000841 } #endif }; #else float filter_x[1][SIZE_X]; float filter_y[SIZE_Y][1]; float filter_xy[SIZE_Y][SIZE_X]; double scale2X = -0.5/(sigma1*sigma1); double scale2Y = -0.5/(sigma2*sigma2); double sum_x = 0; double sum_y = 0; for (int i=0; i < size_x; ++i) { double x = i - (size_x-1)*0.5; double t = std::exp(scale2X*x*x); filter_x[0][i] = (float)t; sum_x += filter_x[0][i]; } for (int i=0; i < size_y; ++i) { double x = i - (size_y-1)*0.5; double t = std::exp(scale2Y*x*x); filter_y[i][0] = (float)t; sum_y += filter_y[i][0]; } sum_x = 1./sum_x; sum_y = 1./sum_y; for (int i=0; i < size_x; ++i) { filter_x[0][i] = (float)(filter_x[0][i]*sum_x); } for (int i=0; i < size_y; ++i) { filter_y[i][0] = (float)(filter_y[i][0]*sum_y); } for (int y=0; y < size_y; ++y) { for (int x=0; x < size_x; ++x) { filter_xy[y][x] = filter_x[0][x]*filter_y[y][0]; } } #endif cv::Mat frame, frame_colored; frame_colored = cv::imread(argv[1], CV_LOAD_IMAGE_COLOR); cvtColor(frame_colored, frame, CV_BGR2GRAY); const int width = frame.cols; const int height = frame.rows; float timing = 0; // input and output image of width x height pixels HipaccImage input = hipaccCreateMemory<uchar>(NULL, width, height, 256); HipaccImage output = hipaccCreateMemory<uchar>(NULL, width, height, 256); HipaccImage tmp = hipaccCreateMemory<float>(NULL, width, height, 256); hipaccWriteMemory(input, frame.data); // filter mask hipaccWriteSymbol<float>((const void *)&_constmaskXY, (float *)filter_xy, 5, 5); HipaccAccessor iter_out(output); HipaccAccessor iter_tmp(tmp); #ifdef NO_SEP HipaccAccessor acc(input); hipacc_launch_info XY_info0(2, 2, iter_out, 8, 1); dim3 block0(32, 1); dim3 grid0(hipaccCalcGridFromBlock(XY_info0, block0)); hipaccPrepareKernelLaunch(XY_info0, block0); hipaccConfigureCall(grid0, block0); size_t offset0 = 0; cudaGetTextureReference(&_texinputXYRef, &_texinputXY); hipaccBindTexture<uchar>(Linear1D, _texinputXYRef, acc.img); hipaccSetupArgument(&iter_out.img.mem, sizeof(uchar *), offset0); hipaccSetupArgument(&iter_out.width, sizeof(const int), offset0); hipaccSetupArgument(&iter_out.height, sizeof(const int), offset0); hipaccSetupArgument(&iter_out.img.stride, sizeof(const int), offset0); hipaccSetupArgument(&acc.width, sizeof(const int), offset0); hipaccSetupArgument(&acc.height, sizeof(const int), offset0); hipaccSetupArgument(&acc.img.stride, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_left, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_right, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_top, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_start_bottom, sizeof(const int), offset0); hipaccSetupArgument(&XY_info0.bh_fall_back, sizeof(const int), offset0); hipaccLaunchKernel((const void *)&cuGaussianFilterXYKernel, "cuGaussianFilterXYKernel", grid0, block0); timing = hipacc_last_kernel_timing(); #else BoundaryCondition<pixel_t> cond_in(input, mask_x, Boundary::BH_MODE); Accessor<pixel_t> acc(cond_in); GaussianFilterRow X(iter_tmp, acc, mask_x, size_x); BoundaryCondition<tmp_pixel_t> cond_tmp(tmp, mask_y, Boundary::BH_MODE); Accessor<tmp_pixel_t> acc_tmp(cond_tmp); GaussianFilterColumn Y(iter_out, acc_tmp, mask_y, size_y); X.execute(); timing = hipacc_last_kernel_timing(); Y.execute(); timing += hipacc_last_kernel_timing(); #endif std::cerr << "Timing: " << timing << " ms, " << (width*height/timing)/1000 << " Mpixel/s" << std::endl; // OpenCV display image std::string outputfn; std::vector<int> compression_params; frame.data = hipaccReadMemory<uchar>(output); outputfn = argv[1]; outputfn = outputfn.substr(0, outputfn.find_last_of(".")) + "-filtered.jpg"; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); compression_params.push_back(100); try { cv::imwrite(outputfn, frame, compression_params); } catch(std::runtime_error &ex) { fprintf(stderr, "JPEG compression exception: %s\n", ex.what()); return 1; } cv::namedWindow("Result", cv::WINDOW_NORMAL); cv::imshow("Result", frame); cv::waitKey(0); fprintf(stdout, "Done!\n"); hipaccReleaseMemory<uchar>(output); hipaccReleaseMemory<float>(tmp); hipaccReleaseMemory<uchar>(input); return 0; }
8720da49b28354498c26ac26e361d8ca9cd85d99.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (target_value == 1) { /* positive */ loss[i] = input_data[i] * (1 - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } else { /* negative */ loss[i] = input_data[i] * (0 - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } } } template <typename Dtype> __global__ void SigmoidCrossEntropyLossBackwardGPU(const int nthreads, Dtype* diff, const Dtype* target, double weight_pos, double weight_neg) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (target_value == 1) { /* positive */ diff[i] *= weight_pos; } else { /* negative */ diff[i] *= weight_neg; } } } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); const int count = bottom[0]->count(); const int dim = count / bottom[0]->channels(); Dtype* loss_data = bottom[0]->mutable_gpu_diff(); hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data); Dtype count_pos = 0; Dtype count_neg = 0; double loss_pos = 0; double loss_neg = 0; const Dtype* target_cpu = bottom[1]->cpu_data(); /* calculate beta */ for (int i = 0; i < dim; i++) { if (target_cpu[i] == 1) count_pos++; else count_neg++; } weight_pos_ = 1.0 * count_neg / (count_pos + count_neg); weight_neg_ = 1.0 * count_pos / (count_pos + count_neg); /* calculate loss for positive and negative pixels */ const Dtype* loss_data_cpu = bottom[0]->cpu_diff(); for (int i = 0; i < dim; i++) { if (target_cpu[i] == 1) loss_pos -= (double)loss_data_cpu[i]; else loss_neg -= (double)loss_data_cpu[i]; } loss_pos *= weight_pos_; loss_neg *= weight_neg_; top[0]->mutable_cpu_data()[0] = (loss_pos * 1 + loss_neg); } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_sub(count, sigmoid_output_data, target, bottom_diff); int dim = bottom[0]->count() / bottom[0]->num(); hipLaunchKernelGGL(( SigmoidCrossEntropyLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_diff, target, (double)weight_pos_, (double)weight_neg_); const Dtype loss_weight = top [0]->cpu_diff()[0]; caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe
8720da49b28354498c26ac26e361d8ca9cd85d99.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (target_value == 1) { /* positive */ loss[i] = input_data[i] * (1 - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } else { /* negative */ loss[i] = input_data[i] * (0 - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); } } } template <typename Dtype> __global__ void SigmoidCrossEntropyLossBackwardGPU(const int nthreads, Dtype* diff, const Dtype* target, double weight_pos, double weight_neg) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (target_value == 1) { /* positive */ diff[i] *= weight_pos; } else { /* negative */ diff[i] *= weight_neg; } } } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_); const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); const int count = bottom[0]->count(); const int dim = count / bottom[0]->channels(); Dtype* loss_data = bottom[0]->mutable_gpu_diff(); SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data); Dtype count_pos = 0; Dtype count_neg = 0; double loss_pos = 0; double loss_neg = 0; const Dtype* target_cpu = bottom[1]->cpu_data(); /* calculate beta */ for (int i = 0; i < dim; i++) { if (target_cpu[i] == 1) count_pos++; else count_neg++; } weight_pos_ = 1.0 * count_neg / (count_pos + count_neg); weight_neg_ = 1.0 * count_pos / (count_pos + count_neg); /* calculate loss for positive and negative pixels */ const Dtype* loss_data_cpu = bottom[0]->cpu_diff(); for (int i = 0; i < dim; i++) { if (target_cpu[i] == 1) loss_pos -= (double)loss_data_cpu[i]; else loss_neg -= (double)loss_data_cpu[i]; } loss_pos *= weight_pos_; loss_neg *= weight_neg_; top[0]->mutable_cpu_data()[0] = (loss_pos * 1 + loss_neg); } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const int num = bottom[0]->num(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_gpu_sub(count, sigmoid_output_data, target, bottom_diff); int dim = bottom[0]->count() / bottom[0]->num(); SigmoidCrossEntropyLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, bottom_diff, target, (double)weight_pos_, (double)weight_neg_); const Dtype loss_weight = top [0]->cpu_diff()[0]; caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe
26305ba4d271e76f471974e439953b5650c55f53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal d -> s @author Peng Du */ #include "common_magma.h" #define qmod(a, b) ((a)-(__mul24((b), (a)/(b)))) #define b_copy() \ do { \ dim3 dimBlock( (M >= MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((M/WARP_SIZE)+(M % WARP_SIZE != 0))), 1 ); \ dim3 dimGrid( (M - 1)/dimBlock.x + 1, N ); \ hipLaunchKernelGGL(( b_copy_kernel), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , M, N, b, ldb, d_x, M); \ magma_device_sync(); \ } while(0) #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void diag_dtrtri_kernel_upper(char diag, const double *A, double *d_dinvA, int lda) { int i, j; double Ystx = 0; double *y = NULL; int switcher = 0; // Thread index int tx = threadIdx.x; // Block index int bx = blockIdx.x; const double *Aoff = A + bx*lda*BLOCK_SIZE + bx*BLOCK_SIZE; int NumBLperNB = NB/BLOCK_SIZE; d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE); __shared__ double Bs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ double workspace[BLOCK_SIZE]; // workspace used to store the current working column // load A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) Bs[i*BLOCK_SIZE+tx] = ((double)(tx <= i))*(*(Aoff+i*lda+tx)); // read in the whole square block of my A and zero out the non data triangular // Synchronize to make sure the matrices are loaded __syncthreads(); switcher = (diag=='u' || diag=='U'); int diagsw = (Bs[tx*BLOCK_SIZE+tx] == 0); Bs[tx*BLOCK_SIZE+tx] = switcher + !switcher*(1/(diagsw + (!diagsw)*Bs[tx*BLOCK_SIZE+tx])); // solve the diagonals /* the upper case */ for( i=0; i < BLOCK_SIZE; i++ ) { Ystx = 0; switcher = (double)(tx < i); //dtrmv workspace[tx] = *(Bs+i*BLOCK_SIZE+tx); y = Bs+i*BLOCK_SIZE; #pragma unroll //for( j=tx; j < i; j++ ) for( j=0; j < i; j++ ) Ystx += switcher*(*(Bs+j*BLOCK_SIZE+tx)*workspace[j]); //sscal switcher = (tx != i); // if (tx != i) y[tx]=switcher*Ystx*(-Bs[i*BLOCK_SIZE+i]); y[tx] = switcher*Ystx*(-Bs[i*BLOCK_SIZE+i])+!switcher*y[tx]; __syncthreads(); } // write back A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx]; } __global__ void diag_dtrtri_kernel_lower(char diag, const double *A, double *d_dinvA, int lda) { int i, j; double Ystx=0; double *Bw=NULL, *x=NULL, *y=NULL; int switcher=0; // Thread index int tx = threadIdx.x; int txw; // Block index int bx = blockIdx.x; const double *Aoff = A+bx*lda*BLOCK_SIZE+bx*BLOCK_SIZE; int NumBLperNB = NB/BLOCK_SIZE; d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE); __shared__ double Bs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ double workspace[BLOCK_SIZE]; // workspace used to store the current working column // load A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) Bs[i*BLOCK_SIZE+tx] = ((double)(tx >= i))*(*(Aoff+i*lda+tx)); // read in the whole square block of my A and zero out the non data triangular // not the upper or lower diagonal // Synchronize to make sure the matrices are loaded __syncthreads(); switcher = (diag=='u' || diag=='U'); int diagsw = (Bs[tx*BLOCK_SIZE+tx] == 0); Bs[tx*BLOCK_SIZE+tx] = switcher + !switcher*(1/(diagsw + (!diagsw)*Bs[tx*BLOCK_SIZE+tx])); // solve the diagonals /* * the lower case */ switcher = !(tx < BLOCK_SIZE-1); Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx] = (double)switcher*Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx]; // zero out the last column, except the diagonal element for( i=BLOCK_SIZE-2; i >= 0; i-- ) { Ystx = 0; switcher = (tx > i); //dtrmv Bw = Bs+(i+1)*BLOCK_SIZE+i+1; workspace[tx] = *(Bs+i*BLOCK_SIZE+tx); x = workspace+i+1; y = Bs+i*BLOCK_SIZE; txw = (tx-i-1); #pragma unroll for( j=0; j < BLOCK_SIZE-i-1; j++ ) Ystx += (double)switcher*(*(Bw+j*BLOCK_SIZE+txw)*x[j]); //sscal switcher = (tx != i); y[tx] = (double)switcher*Ystx*(-Bs[i*BLOCK_SIZE+i])+(double)(!switcher)*y[tx]; __syncthreads(); } // write back A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx]; } /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double * __restrict__ b, double * __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } __device__ void dgemm_kernel_16( double *A, int lda, double *B, int ldb, double *C, int ldc, double alpha, int blk, int inx, int iny, double *c) { const double *Blast = B + blk; __shared__ double bs[16][17]; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = alpha*c[i]; C += ldc; } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_16_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; //const int page = (blockIdx.y)%(npages); const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12 // A=A12, B=inv(A22), C=A12(d_dinvA) const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_16_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; //const int page = (blockIdx.y)%(npages); const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; int PagesPerNB = NB/(blk*2); d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_16_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; int PagesPerNB = NB/(blk*2); d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_32_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A21 // A=A12, B=inv(A22), C=A12(d_dinvA) const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_32_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_32_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_32_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_64_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12(d_dinvA) // A=A12, B=inv(A22), C=A12 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_64_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_64_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_64_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_above64_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12(d_dinvA) // A=A12, B=inv(A22), C=A12 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_above64_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_above64_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = d_dinvA + blk*NB; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * part 3, copy data into position */ __global__ void triple_dgemm_update_above64_part3_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *C_temp, *C_real; int ldc = NB; C_temp = d_dinvA + NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2) + blk; C_real = d_dinvA + NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + blk*NB + (qmod(page, PagesPerNB))*(blk*2); C_temp += ibx + id + __mul24( iby, ldc ); C_real += ibx + id + __mul24( iby, ldc ); for( int i = 0; i < 16; i++ ) { C_real[0] = C_temp[0]; C_temp[0] = 0; C_temp += ldc; C_real += ldc; } } } /* * part 3: copy data back to position */ __global__ void triple_dgemm_update_above64_part3_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; int PagesPerNB = NB/(blk*2); //--------------------------part three---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *C_temp, *C_real; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); C_real = d_dinvA + blk; C_temp = d_dinvA + blk*NB; C_temp += ibx + id + __mul24( iby, ldc ); C_real += ibx + id + __mul24( iby, ldc ); for( int i = 0; i < 16; i++ ) { C_real[0] = C_temp[0]; C_temp[0] = 0; C_real += ldc; C_temp += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_above64_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = d_dinvA + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } __global__ void b_copy_kernel (int M, int N, double *b, int ldb, double *d_x, int ldx) { int by = blockIdx.y; int gx = blockIdx.x*blockDim.x + threadIdx.x; if (gx < M) b[by*ldb+gx] = d_x[by*ldx+gx]; } extern "C" void diag_dtrtri (magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda) { int nblocks = M/BLOCK_SIZE + (M % BLOCK_SIZE != 0); if (uplo == 'l' || uplo == 'L') { // solve the diagonal blocks hipLaunchKernelGGL(( diag_dtrtri_kernel_lower), dim3(nblocks), dim3(BLOCK_SIZE), 0, magma_stream , diag, A, d_dinvA, lda); // update the inverse up to the size of BLOCK_SIZE for( int i=BLOCK_SIZE; i < NB; i*=2 ) { int npages = M/(i*2)+(M%(i*2)!=0); dim3 dimBlock((i <= 32)?(i/4):16, 4); dim3 dimGrid(i/(dimBlock.x*dimBlock.y), npages*(i/16)); // emulated 3D grid, see 3d_grid.txt switch (i) { case 16: hipLaunchKernelGGL(( triple_dgemm_update_16_part1_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_16_part2_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; case 32: hipLaunchKernelGGL(( triple_dgemm_update_32_part1_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_32_part2_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; case 64: hipLaunchKernelGGL(( triple_dgemm_update_64_part1_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_64_part2_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; default: hipLaunchKernelGGL(( triple_dgemm_update_above64_part1_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_above64_part2_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_above64_part3_L), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; } if (i*2 >= M) break; } } else { hipLaunchKernelGGL(( diag_dtrtri_kernel_upper), dim3(nblocks), dim3(BLOCK_SIZE), 0, magma_stream , diag, A, d_dinvA, lda); // update the inverse up to the size of BLOCK_SIZE for( int i=BLOCK_SIZE; i < NB; i*=2 ) { int npages = M/(i*2)+(M%(i*2)!=0); dim3 dimBlock((i <= 32)?(i/4):16, 4); dim3 dimGrid(i/(dimBlock.x*dimBlock.y), npages*(i/16)); // emulated 3D grid, see 3d_grid.txt switch (i) { case 16: hipLaunchKernelGGL(( triple_dgemm_update_16_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; case 32: hipLaunchKernelGGL(( triple_dgemm_update_32_part1_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_32_part2_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; case 64: hipLaunchKernelGGL(( triple_dgemm_update_64_part1_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_64_part2_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; default: hipLaunchKernelGGL(( triple_dgemm_update_above64_part1_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_above64_part2_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); hipLaunchKernelGGL(( triple_dgemm_update_above64_part3_R), dim3(dimGrid), dim3(dimBlock), 0, magma_stream , A, d_dinvA, i, lda, npages); break; } if (i*2 >= M) break; } } } /* * magmablas_dtrsm */ extern "C" void magmablas_dtrsm( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N, double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb ) { /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 Purpose ======= dtrsm solves one of the matrix equations on gpu op( A )*x = alpha*b, or x*op( A ) = alpha*b, where alpha is a scalar, x and b are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A^T. The matrix X is overwritten on B. When M or N is not a multiple of blocking size, which is 32 for now, hipblasDtrsm will be called instead. There soon will not be this limitation both for arbitrary problem size and blocking size. Arguments ========== side CHARACTER*1. On entry, side specifies whether op( A ) appears on the left or right of X as follows: side = 'L' or 'l' op( A )*X = alpha*B. side = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. uplo CHARACTER*1. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: uplo = 'U' or 'u' A is an upper triangular matrix. uplo = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. tran CHARACTER*1. On entry, tran specifies the form of op( A ) to be used in the matrix multiplication as follows: tran = 'N' or 'n' op( A ) = A. tran = 'T' or 't' op( A ) = A^T. tran = 'C' or 'c' op( A ) = A^T. Unchanged on exit. diag CHARACTER*1. On entry, diag specifies whether or not A is unit triangular as follows: diag = 'U' or 'u' A is assumed to be unit triangular. diag = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. Unchanged on exit. n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. Unchanged on exit. alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A REAL array of DIMENSION ( lda, k ), where k is m when side = 'L' or 'l' and is n when side = 'R' or 'r'. Before entry with uplo = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. lda INTEGER. On entry, lda specifies the first dimension of A as declared in the calling (sub) program. When side = 'L' or 'l' then lda must be at least max( 1, m ), when side = 'R' or 'r' then lda must be at least max( 1, n ). Unchanged on exit. b REAL array of DIMENSION ( ldb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. ldb INTEGER. On entry, ldb specifies the first dimension of B as declared in the calling (sub) program. ldb must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. ===================================================================== */ int i; double *d_dinvA, *d_x; /* quick return on wrong size */ if (M <= 0 || N <= 0) return; if (side == 'l' || side == 'L') { // side=L /* invert the diagonals * Allocate device memory for the inverted diagonal blocks, size=m*NB */ magma_dmalloc( &d_dinvA, NB*((M/NB)+(M % NB != 0))*NB ); magma_dmalloc( &d_x, N*M ); hipMemset(d_x, 0, N*M*sizeof(double)); hipMemset(d_dinvA, 0, NB*((M/NB)+(M % NB != 0))*NB*sizeof(double)); diag_dtrtri (M, uplo, diag, A, d_dinvA, lda); if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); hipblasDgemm('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } hipblasDgemm('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for( i=NB; i < M; i += NB ) { MM = min (M-i, NB); hipblasDgemm('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; hipblasDgemm('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = (M % NB == 0) ? NB : (M % NB); i = M-MM; hipblasDgemm('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } hipblasDgemm('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for( i=M-MM-NB; i >= 0; i -= NB ) { hipblasDgemm('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) break; hipblasDgemm('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = (M % NB == 0) ? NB : (M % NB); i = M-MM; hipblasDgemm('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } hipblasDgemm('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for( i=M-MM-NB; i >= 0; i -= NB ) { hipblasDgemm('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) break; hipblasDgemm('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); hipblasDgemm('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } hipblasDgemm('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for( i=NB; i < M; i += NB ) { MM = min (M-i, NB); hipblasDgemm('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; hipblasDgemm('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } } } else { // side=R /* invert the diagonals * Allocate device memory for the inverted diagonal blocks, size=N*BLOCK_SIZE */ magma_dmalloc( &d_dinvA, NB*((N/NB) + (N % NB != 0))*NB ); magma_dmalloc( &d_x, N*M ); hipMemset(d_x, 0, N*M*sizeof(double)); hipMemset(d_dinvA, 0, NB*((N/NB)+(N % NB != 0))*NB*sizeof(double)); diag_dtrtri (N, uplo, diag, A, d_dinvA, lda); if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = (N % NB == 0) ? NB : (N % NB); i = N-NN; hipblasDgemm('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } hipblasDgemm('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb); /* the rest blocks */ for( i=N-NN-NB; i >= 0; i -= NB ) { hipblasDgemm('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; hipblasDgemm('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); hipblasDgemm('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } hipblasDgemm('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for( i=NB; i < N; i += NB ) { NN = min(NB, N-i); hipblasDgemm('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; hipblasDgemm('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); hipblasDgemm('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } hipblasDgemm('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for( i=NB; i < N; i += NB ) { NN = min(NB, N-i); hipblasDgemm('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; hipblasDgemm('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = (N % NB == 0) ? NB : (N % NB); i = N-NN; hipblasDgemm('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } hipblasDgemm('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb); /* the rest blocks */ for( i=N-NN-NB; i >= 0; i -= NB ) { hipblasDgemm('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; hipblasDgemm('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb); } } } } b_copy(); magma_free( d_dinvA ); magma_free( d_x ); }
26305ba4d271e76f471974e439953b5650c55f53.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @precisions normal d -> s @author Peng Du */ #include "common_magma.h" #define qmod(a, b) ((a)-(__mul24((b), (a)/(b)))) #define b_copy() \ do { \ dim3 dimBlock( (M >= MAX_THREAD_PER_BLOCK) ? MAX_THREAD_PER_BLOCK : (WARP_SIZE*((M/WARP_SIZE)+(M % WARP_SIZE != 0))), 1 ); \ dim3 dimGrid( (M - 1)/dimBlock.x + 1, N ); \ b_copy_kernel<<< dimGrid, dimBlock, 0, magma_stream >>>(M, N, b, ldb, d_x, M); \ magma_device_sync(); \ } while(0) #define MAX_THREAD_PER_BLOCK 512 #define WARP_SIZE 32 #define BLOCK_SIZE 16 // inner blocking size, <=32 #define NB 128 // outer blocking size, >BLOCK_SIZE __global__ void diag_dtrtri_kernel_upper(char diag, const double *A, double *d_dinvA, int lda) { int i, j; double Ystx = 0; double *y = NULL; int switcher = 0; // Thread index int tx = threadIdx.x; // Block index int bx = blockIdx.x; const double *Aoff = A + bx*lda*BLOCK_SIZE + bx*BLOCK_SIZE; int NumBLperNB = NB/BLOCK_SIZE; d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE); __shared__ double Bs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ double workspace[BLOCK_SIZE]; // workspace used to store the current working column // load A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) Bs[i*BLOCK_SIZE+tx] = ((double)(tx <= i))*(*(Aoff+i*lda+tx)); // read in the whole square block of my A and zero out the non data triangular // Synchronize to make sure the matrices are loaded __syncthreads(); switcher = (diag=='u' || diag=='U'); int diagsw = (Bs[tx*BLOCK_SIZE+tx] == 0); Bs[tx*BLOCK_SIZE+tx] = switcher + !switcher*(1/(diagsw + (!diagsw)*Bs[tx*BLOCK_SIZE+tx])); // solve the diagonals /* the upper case */ for( i=0; i < BLOCK_SIZE; i++ ) { Ystx = 0; switcher = (double)(tx < i); //dtrmv workspace[tx] = *(Bs+i*BLOCK_SIZE+tx); y = Bs+i*BLOCK_SIZE; #pragma unroll //for( j=tx; j < i; j++ ) for( j=0; j < i; j++ ) Ystx += switcher*(*(Bs+j*BLOCK_SIZE+tx)*workspace[j]); //sscal switcher = (tx != i); // if (tx != i) y[tx]=switcher*Ystx*(-Bs[i*BLOCK_SIZE+i]); y[tx] = switcher*Ystx*(-Bs[i*BLOCK_SIZE+i])+!switcher*y[tx]; __syncthreads(); } // write back A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx]; } __global__ void diag_dtrtri_kernel_lower(char diag, const double *A, double *d_dinvA, int lda) { int i, j; double Ystx=0; double *Bw=NULL, *x=NULL, *y=NULL; int switcher=0; // Thread index int tx = threadIdx.x; int txw; // Block index int bx = blockIdx.x; const double *Aoff = A+bx*lda*BLOCK_SIZE+bx*BLOCK_SIZE; int NumBLperNB = NB/BLOCK_SIZE; d_dinvA += bx/NumBLperNB*NB*NB + (bx % NumBLperNB)*(NB*BLOCK_SIZE + BLOCK_SIZE); __shared__ double Bs[BLOCK_SIZE*BLOCK_SIZE]; __shared__ double workspace[BLOCK_SIZE]; // workspace used to store the current working column // load A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) Bs[i*BLOCK_SIZE+tx] = ((double)(tx >= i))*(*(Aoff+i*lda+tx)); // read in the whole square block of my A and zero out the non data triangular // not the upper or lower diagonal // Synchronize to make sure the matrices are loaded __syncthreads(); switcher = (diag=='u' || diag=='U'); int diagsw = (Bs[tx*BLOCK_SIZE+tx] == 0); Bs[tx*BLOCK_SIZE+tx] = switcher + !switcher*(1/(diagsw + (!diagsw)*Bs[tx*BLOCK_SIZE+tx])); // solve the diagonals /* * the lower case */ switcher = !(tx < BLOCK_SIZE-1); Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx] = (double)switcher*Bs[(BLOCK_SIZE-1)*BLOCK_SIZE+tx]; // zero out the last column, except the diagonal element for( i=BLOCK_SIZE-2; i >= 0; i-- ) { Ystx = 0; switcher = (tx > i); //dtrmv Bw = Bs+(i+1)*BLOCK_SIZE+i+1; workspace[tx] = *(Bs+i*BLOCK_SIZE+tx); x = workspace+i+1; y = Bs+i*BLOCK_SIZE; txw = (tx-i-1); #pragma unroll for( j=0; j < BLOCK_SIZE-i-1; j++ ) Ystx += (double)switcher*(*(Bw+j*BLOCK_SIZE+txw)*x[j]); //sscal switcher = (tx != i); y[tx] = (double)switcher*Ystx*(-Bs[i*BLOCK_SIZE+i])+(double)(!switcher)*y[tx]; __syncthreads(); } // write back A #pragma unroll for( i=0; i < BLOCK_SIZE; i++ ) *(d_dinvA+i*NB+tx) = Bs[i*BLOCK_SIZE+tx]; } /* * daxpy computes c += alpha*b, where b and c are 16-element vectors. */ static __device__ void daxpy( double alpha, const double * __restrict__ b, double * __restrict__ c ) { c[0] += alpha * b[0]; c[1] += alpha * b[1]; c[2] += alpha * b[2]; c[3] += alpha * b[3]; c[4] += alpha * b[4]; c[5] += alpha * b[5]; c[6] += alpha * b[6]; c[7] += alpha * b[7]; c[8] += alpha * b[8]; c[9] += alpha * b[9]; c[10] += alpha * b[10]; c[11] += alpha * b[11]; c[12] += alpha * b[12]; c[13] += alpha * b[13]; c[14] += alpha * b[14]; c[15] += alpha * b[15]; } __device__ void dgemm_kernel_16( double *A, int lda, double *B, int ldb, double *C, int ldc, double alpha, int blk, int inx, int iny, double *c) { const double *Blast = B + blk; __shared__ double bs[16][17]; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = alpha*c[i]; C += ldc; } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_16_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; //const int page = (blockIdx.y)%(npages); const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12 // A=A12, B=inv(A22), C=A12(d_dinvA) const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_16_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; //const int page = (blockIdx.y)%(npages); const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; int PagesPerNB = NB/(blk*2); d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_16_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; int PagesPerNB = NB/(blk*2); d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+ 4][iny ] = B[ 4+ 0*ldb]; bs[inx+ 4][iny+ 4] = B[ 4+ 4*ldb]; bs[inx+ 4][iny+ 8] = B[ 4+ 8*ldb]; bs[inx+ 4][iny+12] = B[ 4+12*ldb]; bs[inx+ 8][iny ] = B[ 8+ 0*ldb]; bs[inx+ 8][iny+ 4] = B[ 8+ 4*ldb]; bs[inx+ 8][iny+ 8] = B[ 8+ 8*ldb]; bs[inx+ 8][iny+12] = B[ 8+12*ldb]; bs[inx+12][iny ] = B[12+ 0*ldb]; bs[inx+12][iny+ 4] = B[12+ 4*ldb]; bs[inx+12][iny+ 8] = B[12+ 8*ldb]; bs[inx+12][iny+12] = B[12+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_32_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A21 // A=A12, B=inv(A22), C=A12(d_dinvA) const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_32_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_32_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_32_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x * (blockDim.x*blockDim.y); const int iby = bIdy * 16; const int id = inx + iny*blockDim.x; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx ][iny ] = B[ 0*ldb]; bs[inx ][iny+ 4] = B[ 4*ldb]; bs[inx ][iny+ 8] = B[ 8*ldb]; bs[inx ][iny+12] = B[ 12*ldb]; bs[inx+8][iny ] = B[8+ 0*ldb]; bs[inx+8][iny+ 4] = B[8+ 4*ldb]; bs[inx+8][iny+ 8] = B[8+ 8*ldb]; bs[inx+8][iny+12] = B[8+12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_64_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12(d_dinvA) // A=A12, B=inv(A22), C=A12 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_64_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_64_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_64_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_above64_part1_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A12*inv(A22) -> A12(d_dinvA) // A=A12, B=inv(A22), C=A12 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + blk*lda + page*blk*2; B = d_dinvA + blk*NB + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_above64_part1_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part one---------------------------// { // A21*inv(A11) -> A21 // A=A21, B=inv(A11), C=A21 const double *A; double *B, *C; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = Ain + page*lda*blk*2 + page*blk*2 + blk; B = d_dinvA; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = c[i]; C += ldc; } } } /* * B21 = -inv(A11)*A12*inv(A22) */ __global__ void triple_dgemm_update_above64_part2_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 const double *A; double *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA; B = d_dinvA + blk*NB; C = d_dinvA + blk; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } /* * part 3, copy data into position */ __global__ void triple_dgemm_update_above64_part3_R (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A11)*A12 -> A12 // A=inv(A11), B=A12, C=A12 double *C_temp, *C_real; int ldc = NB; C_temp = d_dinvA + NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2) + blk; C_real = d_dinvA + NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + blk*NB + (qmod(page, PagesPerNB))*(blk*2); C_temp += ibx + id + __mul24( iby, ldc ); C_real += ibx + id + __mul24( iby, ldc ); for( int i = 0; i < 16; i++ ) { C_real[0] = C_temp[0]; C_temp[0] = 0; C_temp += ldc; C_real += ldc; } } } /* * part 3: copy data back to position */ __global__ void triple_dgemm_update_above64_part3_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; int PagesPerNB = NB/(blk*2); //--------------------------part three---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *C_temp, *C_real; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); C_real = d_dinvA + blk; C_temp = d_dinvA + blk*NB; C_temp += ibx + id + __mul24( iby, ldc ); C_real += ibx + id + __mul24( iby, ldc ); for( int i = 0; i < 16; i++ ) { C_real[0] = C_temp[0]; C_temp[0] = 0; C_real += ldc; C_temp += ldc; } } __syncthreads(); } /* * B21 = -inv(A22)*A21*inv(A11) */ __global__ void triple_dgemm_update_above64_part2_L (const double *Ain, double *d_dinvA, int blk, int lda, int npages) { const int bIdy = blockIdx.y/npages; const int page = qmod(blockIdx.y, npages); const int inx = threadIdx.x; const int iny = threadIdx.y; const int ibx = blockIdx.x*64; const int iby = bIdy*16; const int id = inx + iny*16; __shared__ double bs[16][17]; int PagesPerNB = NB/(blk*2); //--------------------------part two---------------------------// { // -inv(A22)*A21 -> A21 // A=inv(A22), B=A21, C=A21 double *A, *B, *C; int lda = NB; int ldb = NB; int ldc = NB; d_dinvA += NB*NB*(page/PagesPerNB) + (qmod(page, PagesPerNB))*(blk*2)*NB + (qmod(page, PagesPerNB))*(blk*2); A = d_dinvA + blk*NB + blk; B = d_dinvA + blk; C = d_dinvA + blk*NB; A += ibx + id; B += inx + __mul24( iby + iny, ldb ); C += ibx + id + __mul24( iby, ldc ); const double *Blast = B + blk; double c[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; do { double a[4] = { A[0*lda], A[1*lda], A[2*lda], A[3*lda] }; bs[inx][iny ] = B[ 0*ldb]; bs[inx][iny+ 4] = B[ 4*ldb]; bs[inx][iny+ 8] = B[ 8*ldb]; bs[inx][iny+12] = B[12*ldb]; __syncthreads(); A += 4*lda; daxpy( a[0], &bs[ 0][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 1][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 2][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 3][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 4][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 5][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[ 6][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[ 7][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[ 8][0], c ); a[0] = A[0*lda]; daxpy( a[1], &bs[ 9][0], c ); a[1] = A[1*lda]; daxpy( a[2], &bs[10][0], c ); a[2] = A[2*lda]; daxpy( a[3], &bs[11][0], c ); a[3] = A[3*lda]; A += 4*lda; daxpy( a[0], &bs[12][0], c ); daxpy( a[1], &bs[13][0], c ); daxpy( a[2], &bs[14][0], c ); daxpy( a[3], &bs[15][0], c ); B += 16; __syncthreads(); } while( B < Blast ); for( int i = 0; i < 16; i++ ) { C[0] = (-1)*c[i]; C += ldc; } } } __global__ void b_copy_kernel (int M, int N, double *b, int ldb, double *d_x, int ldx) { int by = blockIdx.y; int gx = blockIdx.x*blockDim.x + threadIdx.x; if (gx < M) b[by*ldb+gx] = d_x[by*ldx+gx]; } extern "C" void diag_dtrtri (magma_int_t M, char uplo, char diag, const double *A, double *d_dinvA, magma_int_t lda) { int nblocks = M/BLOCK_SIZE + (M % BLOCK_SIZE != 0); if (uplo == 'l' || uplo == 'L') { // solve the diagonal blocks diag_dtrtri_kernel_lower<<< nblocks, BLOCK_SIZE, 0, magma_stream >>>(diag, A, d_dinvA, lda); // update the inverse up to the size of BLOCK_SIZE for( int i=BLOCK_SIZE; i < NB; i*=2 ) { int npages = M/(i*2)+(M%(i*2)!=0); dim3 dimBlock((i <= 32)?(i/4):16, 4); dim3 dimGrid(i/(dimBlock.x*dimBlock.y), npages*(i/16)); // emulated 3D grid, see 3d_grid.txt switch (i) { case 16: triple_dgemm_update_16_part1_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_16_part2_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; case 32: triple_dgemm_update_32_part1_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_32_part2_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; case 64: triple_dgemm_update_64_part1_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_64_part2_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; default: triple_dgemm_update_above64_part1_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_above64_part2_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_above64_part3_L<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; } if (i*2 >= M) break; } } else { diag_dtrtri_kernel_upper<<< nblocks, BLOCK_SIZE, 0, magma_stream >>>(diag, A, d_dinvA, lda); // update the inverse up to the size of BLOCK_SIZE for( int i=BLOCK_SIZE; i < NB; i*=2 ) { int npages = M/(i*2)+(M%(i*2)!=0); dim3 dimBlock((i <= 32)?(i/4):16, 4); dim3 dimGrid(i/(dimBlock.x*dimBlock.y), npages*(i/16)); // emulated 3D grid, see 3d_grid.txt switch (i) { case 16: triple_dgemm_update_16_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; case 32: triple_dgemm_update_32_part1_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_32_part2_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; case 64: triple_dgemm_update_64_part1_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_64_part2_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; default: triple_dgemm_update_above64_part1_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_above64_part2_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); triple_dgemm_update_above64_part3_R<<< dimGrid, dimBlock, 0, magma_stream >>>(A, d_dinvA, i, lda, npages); break; } if (i*2 >= M) break; } } } /* * magmablas_dtrsm */ extern "C" void magmablas_dtrsm( char side, char uplo, char tran, char diag, magma_int_t M, magma_int_t N, double alpha, const double* A, magma_int_t lda, double* b, magma_int_t ldb ) { /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 Purpose ======= dtrsm solves one of the matrix equations on gpu op( A )*x = alpha*b, or x*op( A ) = alpha*b, where alpha is a scalar, x and b are m by n matrices, A is a unit, or non-unit, upper or lower triangular matrix and op( A ) is one of op( A ) = A or op( A ) = A^T. The matrix X is overwritten on B. When M or N is not a multiple of blocking size, which is 32 for now, cublasDtrsm will be called instead. There soon will not be this limitation both for arbitrary problem size and blocking size. Arguments ========== side CHARACTER*1. On entry, side specifies whether op( A ) appears on the left or right of X as follows: side = 'L' or 'l' op( A )*X = alpha*B. side = 'R' or 'r' X*op( A ) = alpha*B. Unchanged on exit. uplo CHARACTER*1. On entry, uplo specifies whether the matrix A is an upper or lower triangular matrix as follows: uplo = 'U' or 'u' A is an upper triangular matrix. uplo = 'L' or 'l' A is a lower triangular matrix. Unchanged on exit. tran CHARACTER*1. On entry, tran specifies the form of op( A ) to be used in the matrix multiplication as follows: tran = 'N' or 'n' op( A ) = A. tran = 'T' or 't' op( A ) = A^T. tran = 'C' or 'c' op( A ) = A^T. Unchanged on exit. diag CHARACTER*1. On entry, diag specifies whether or not A is unit triangular as follows: diag = 'U' or 'u' A is assumed to be unit triangular. diag = 'N' or 'n' A is not assumed to be unit triangular. Unchanged on exit. m INTEGER. On entry, m specifies the number of rows of B. m must be at least zero. Unchanged on exit. n INTEGER. On entry, n specifies the number of columns of B. n must be at least zero. Unchanged on exit. alpha REAL. On entry, alpha specifies the scalar alpha. When alpha is zero then A is not referenced and B need not be set before entry. Unchanged on exit. A REAL array of DIMENSION ( lda, k ), where k is m when side = 'L' or 'l' and is n when side = 'R' or 'r'. Before entry with uplo = 'U' or 'u', the leading k by k upper triangular part of the array A must contain the upper triangular matrix and the strictly lower triangular part of A is not referenced. Before entry with uplo = 'L' or 'l', the leading k by k lower triangular part of the array A must contain the lower triangular matrix and the strictly upper triangular part of A is not referenced. Note that when diag = 'U' or 'u', the diagonal elements of A are not referenced either, but are assumed to be unity. Unchanged on exit. lda INTEGER. On entry, lda specifies the first dimension of A as declared in the calling (sub) program. When side = 'L' or 'l' then lda must be at least max( 1, m ), when side = 'R' or 'r' then lda must be at least max( 1, n ). Unchanged on exit. b REAL array of DIMENSION ( ldb, n ). Before entry, the leading m by n part of the array B must contain the right-hand side matrix B, and on exit is overwritten by the solution matrix X. ldb INTEGER. On entry, ldb specifies the first dimension of B as declared in the calling (sub) program. ldb must be at least max( 1, m ). Unchanged on exit. Level 3 Blas routine. ===================================================================== */ int i; double *d_dinvA, *d_x; /* quick return on wrong size */ if (M <= 0 || N <= 0) return; if (side == 'l' || side == 'L') { // side=L /* invert the diagonals * Allocate device memory for the inverted diagonal blocks, size=m*NB */ magma_dmalloc( &d_dinvA, NB*((M/NB)+(M % NB != 0))*NB ); magma_dmalloc( &d_x, N*M ); cudaMemset(d_x, 0, N*M*sizeof(double)); cudaMemset(d_dinvA, 0, NB*((M/NB)+(M % NB != 0))*NB*sizeof(double)); diag_dtrtri (M, uplo, diag, A, d_dinvA, lda); if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); cublasDgemm('N', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } cublasDgemm('N', 'N', M-NB, N, NB, -1.0, A+NB, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for( i=NB; i < M; i += NB ) { MM = min (M-i, NB); cublasDgemm('N', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; cublasDgemm('N', 'N', M-i-NB, N, NB, -1.0, A+i*lda+i+NB, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = (M % NB == 0) ? NB : (M % NB); i = M-MM; cublasDgemm('N', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } cublasDgemm('N', 'N', i, N, MM, -1.0, A+i*lda, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for( i=M-MM-NB; i >= 0; i -= NB ) { cublasDgemm('N', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0.0, d_x+i, M); if (i-NB < 0) break; cublasDgemm('N', 'N', i, N, NB, -1.0, A+i*lda, lda, d_x+i, M, 1.0, b, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int MM = (M % NB == 0) ? NB : (M % NB); i = M-MM; cublasDgemm('T', 'N', MM, N, MM, alpha, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } cublasDgemm('T', 'N', i, N, MM, -1.0, A+i, lda, d_x+i, M, alpha, b, ldb); /* the rest blocks */ for( i=M-MM-NB; i >= 0; i -= NB ) { cublasDgemm('T', 'N', NB, N, NB, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i-NB < 0) break; cublasDgemm('T', 'N', i, N, NB, -1.0, A+i, lda, d_x+i, M, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int MM = min (NB, M); cublasDgemm('T', 'N', MM, N, MM, alpha, d_dinvA, NB, b, ldb, 0, d_x, M); if (NB >= M) { b_copy(); magma_free( d_dinvA ); magma_free( d_x ); return; } cublasDgemm('T', 'N', M-NB, N, NB, -1.0, A+(NB)*lda, lda, d_x, M, alpha, b+NB, ldb); /* the rest blocks */ for( i=NB; i < M; i += NB ) { MM = min (M-i, NB); cublasDgemm('T', 'N', MM, N, MM, 1.0, d_dinvA+i*NB, NB, b+i, ldb, 0, d_x+i, M); if (i+NB >= M) break; cublasDgemm('T', 'N', M-i-NB, N, NB, -1.0, A+(i+NB)*lda+i, lda, d_x+i, M, 1.0, b+i+NB, ldb); } } } } else { // side=R /* invert the diagonals * Allocate device memory for the inverted diagonal blocks, size=N*BLOCK_SIZE */ magma_dmalloc( &d_dinvA, NB*((N/NB) + (N % NB != 0))*NB ); magma_dmalloc( &d_x, N*M ); cudaMemset(d_x, 0, N*M*sizeof(double)); cudaMemset(d_dinvA, 0, NB*((N/NB)+(N % NB != 0))*NB*sizeof(double)); diag_dtrtri (N, uplo, diag, A, d_dinvA, lda); if (tran == 'N' || tran == 'n') { /* the non-transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = (N % NB == 0) ? NB : (N % NB); i = N-NN; cublasDgemm('N', 'N', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } cublasDgemm('N', 'N', M, i, NN, -1.0, d_x+i*M, M, A+i, lda, alpha, b, ldb); /* the rest blocks */ for( i=N-NN-NB; i >= 0; i -= NB ) { cublasDgemm('N', 'N', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; cublasDgemm('N', 'N', M, i, NB, -1.0, d_x+i*M, M, A+i, lda, 1.0, b, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); cublasDgemm('N', 'N', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } cublasDgemm('N', 'N', M, N-NB, NB, -1.0, d_x, M, A+NB*lda, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for( i=NB; i < N; i += NB ) { NN = min(NB, N-i); cublasDgemm('N', 'N', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; cublasDgemm('N', 'N', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+(i+NB)*lda+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } } else { /* the transpose case */ if (uplo == 'L' || uplo == 'l') { /* the lower case */ /* handle the first block seperately with alpha */ int NN = min(NB, N); cublasDgemm('N', 'T', M, NN, NN, alpha, b, ldb, d_dinvA, NB, 0, d_x, M); if (NB >= N) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } cublasDgemm('N', 'T', M, N-NB, NB, -1.0, d_x, M, A+NB, lda, alpha, b+NB*ldb, ldb); /* the rest blocks */ for( i=NB; i < N; i += NB ) { NN = min(NB, N-i); cublasDgemm('N', 'T', M, NN, NN, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0, d_x+i*M, M); if (i+NB >= N) break; cublasDgemm('N', 'T', M, N-i-NB, NB, -1.0, d_x+i*M, M, A+i*lda+NB+i, lda, 1.0, b+(i+NB)*ldb, ldb); } } else { /* the upper case */ /* handle the first block seperately with alpha */ int NN = (N % NB == 0) ? NB : (N % NB); i = N-NN; cublasDgemm('N', 'T', M, NN, NN, alpha, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) { b_copy(); magma_free( d_x ); magma_free( d_dinvA ); return; } cublasDgemm('N', 'T', M, i, NN, -1.0, d_x+i*M, M, A+i*lda, lda, alpha, b, ldb); /* the rest blocks */ for( i=N-NN-NB; i >= 0; i -= NB ) { cublasDgemm('N', 'T', M, NB, NB, 1.0, b+ldb*i, ldb, d_dinvA+i*NB, NB, 0.0, d_x+i*M, M); if (i-NB < 0) break; cublasDgemm('N', 'T', M, i, NB, -1.0, d_x+i*M, M, A+i*lda, lda, 1.0, b, ldb); } } } } b_copy(); magma_free( d_dinvA ); magma_free( d_x ); }
02d6ff733d6d4d34da197af3aac4eb50725e9db1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // To compile: nvcc CPUAndGPUVectorAddition1Block.cu -o temp2 // To run: ./temp2 #include <sys/time.h> #include <stdio.h> #define N 1030 #define SIZEOFBLOCKS 5 //This is the CUDA kernel that will add the two vectors. __global__ void Addition(float *A, float *B, float *C, int n) { int id = threadIdx.x; if(id < N) //This if keeps rogue threads from digging ditches in your nieghbors yard. { C[id] = A[id] + B[id]; } } int main() { int id; float sum, time; float *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the Host float *A_GPU, *B_GPU, *C_GPU; //Pointers for memory on the Device dim3 dimBlock; //This variable will hold the Dimensions of your block dim3 dimGrid; //This variable will hold the Dimensions of your grid timeval start, end; //Threads in a block dimBlock.x = N; dimBlock.y = 1; dimBlock.z = 1; //Blocks in a grid dimGrid.x = 1; dimGrid.y = 1; dimGrid.z = 1; //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); //Allocate Device (GPU) Memory hipMalloc(&A_GPU,N*sizeof(float)); hipMalloc(&B_GPU,N*sizeof(float)); hipMalloc(&C_GPU,N*sizeof(float)); //Loads values into vectors that we will add. for(id = 0; id < N; id++) { A_CPU[id] = (float)id*2; B_CPU[id] = (float)id; } printf("\nNumber of threads in block: %d\n\n", N); //********************** CPU addition start **************************************** //Starting a timer gettimeofday(&start, NULL); //Add the two vectors for(id = 0; id < N; id++) { C_CPU[id] = A_CPU[id] + B_CPU[id]; } //Stopping the timer gettimeofday(&end, NULL); //********************** CPU addition finish **************************************** //Calculating the total time used in the addition on the CPU and converting it to milliseconds and printing it to the screen. time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf("------ CPU Results ------\n"); printf("CPU Time in milliseconds= %.15f\n", (time/1000.0)); //Summing up the vector C and printing it so we can have a spot check for the correctness of the CPU addition. sum = 0.0; for(id = 0; id < N; id++) { sum += C_CPU[id]; } printf("Sum of C_CPU from CPU addition= %.15f\n", sum); printf("C_CPU[%d]= %f\n", N-1, C_CPU[N-1]); for(id = 0; id < N; id++) { C_CPU[id] = 0; } //********************** GPU addition start **************************************** //Starting a timer gettimeofday(&start, NULL); //Copying vectors A_CPU and B_CPU that were loaded on the CPU up to the GPU hipMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), hipMemcpyHostToDevice); hipMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Addition), dim3(dimGrid), dim3(dimBlock), 0, 0, A_GPU, B_GPU, C_GPU, N); //Copy C_GPU that was calculated on the GPU down to the CPU hipMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), hipMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //********************** GPU addition finish **************************************** //Calculating the total time used in the addition on the GPU and converting it to milliseconds and printing it to the screen. time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf("\n------ GPU Results ------\n"); printf("GPU Time in milliseconds= %.15f\n", (time/1000.0)); //Summing up the vector C and printing it so we can have a spot check for the correctness of the GPU addition. sum = 0.0; for(id = 0; id < N; id++) { sum += C_CPU[id]; } printf("Sum of C_CPU from GPU addition= %.15f\n", sum); printf("C_CPU[%d]= %f\n", N-1, C_CPU[N-1]); //Your done so cleanup your mess. free(A_CPU); free(B_CPU); free(C_CPU); hipFree(A_GPU); hipFree(B_GPU); hipFree(C_GPU); return(0); }
02d6ff733d6d4d34da197af3aac4eb50725e9db1.cu
// To compile: nvcc CPUAndGPUVectorAddition1Block.cu -o temp2 // To run: ./temp2 #include <sys/time.h> #include <stdio.h> #define N 1030 #define SIZEOFBLOCKS 5 //This is the CUDA kernel that will add the two vectors. __global__ void Addition(float *A, float *B, float *C, int n) { int id = threadIdx.x; if(id < N) //This if keeps rogue threads from digging ditches in your nieghbors yard. { C[id] = A[id] + B[id]; } } int main() { int id; float sum, time; float *A_CPU, *B_CPU, *C_CPU; //Pointers for memory on the Host float *A_GPU, *B_GPU, *C_GPU; //Pointers for memory on the Device dim3 dimBlock; //This variable will hold the Dimensions of your block dim3 dimGrid; //This variable will hold the Dimensions of your grid timeval start, end; //Threads in a block dimBlock.x = N; dimBlock.y = 1; dimBlock.z = 1; //Blocks in a grid dimGrid.x = 1; dimGrid.y = 1; dimGrid.z = 1; //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); //Allocate Device (GPU) Memory cudaMalloc(&A_GPU,N*sizeof(float)); cudaMalloc(&B_GPU,N*sizeof(float)); cudaMalloc(&C_GPU,N*sizeof(float)); //Loads values into vectors that we will add. for(id = 0; id < N; id++) { A_CPU[id] = (float)id*2; B_CPU[id] = (float)id; } printf("\nNumber of threads in block: %d\n\n", N); //********************** CPU addition start **************************************** //Starting a timer gettimeofday(&start, NULL); //Add the two vectors for(id = 0; id < N; id++) { C_CPU[id] = A_CPU[id] + B_CPU[id]; } //Stopping the timer gettimeofday(&end, NULL); //********************** CPU addition finish **************************************** //Calculating the total time used in the addition on the CPU and converting it to milliseconds and printing it to the screen. time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf("------ CPU Results ------\n"); printf("CPU Time in milliseconds= %.15f\n", (time/1000.0)); //Summing up the vector C and printing it so we can have a spot check for the correctness of the CPU addition. sum = 0.0; for(id = 0; id < N; id++) { sum += C_CPU[id]; } printf("Sum of C_CPU from CPU addition= %.15f\n", sum); printf("C_CPU[%d]= %f\n", N-1, C_CPU[N-1]); for(id = 0; id < N; id++) { C_CPU[id] = 0; } //********************** GPU addition start **************************************** //Starting a timer gettimeofday(&start, NULL); //Copying vectors A_CPU and B_CPU that were loaded on the CPU up to the GPU cudaMemcpyAsync(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyAsync(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); Addition<<<dimGrid, dimBlock>>>(A_GPU, B_GPU, C_GPU, N); //Copy C_GPU that was calculated on the GPU down to the CPU cudaMemcpyAsync(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost); //Stopping the timer gettimeofday(&end, NULL); //********************** GPU addition finish **************************************** //Calculating the total time used in the addition on the GPU and converting it to milliseconds and printing it to the screen. time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); printf("\n------ GPU Results ------\n"); printf("GPU Time in milliseconds= %.15f\n", (time/1000.0)); //Summing up the vector C and printing it so we can have a spot check for the correctness of the GPU addition. sum = 0.0; for(id = 0; id < N; id++) { sum += C_CPU[id]; } printf("Sum of C_CPU from GPU addition= %.15f\n", sum); printf("C_CPU[%d]= %f\n", N-1, C_CPU[N-1]); //Your done so cleanup your mess. free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); return(0); }
226a342b0130037069c112ecfd7c0dc766a91cca.hip
// !!! This is a file automatically generated by hipify!!! // Vector addition: C = 1/A + 1/B, for arbitrarily long vectors // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o vecAdd vecAdd.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o vecAdd vecAdd.cu // Includes #include <stdio.h> #include <stdlib.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_D; float* d_A; // device vectors float* d_B; float* d_C; // Functions void RandomInit(float*, long); // Device code __global__ void VecAdd(const float* A, const float* B, float* C, long N) { long i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) // only for N < blockDim.x*gridDim.x // C[i] = 1.0/A[i] + 1.0/B[i]; while (i < N) { C[i] = 1.0/A[i] + 1.0/B[i]; i += blockDim.x * gridDim.x; // go to the next grid } __syncthreads(); } // Host code int main(void) { int gid; // Error code to check return values for CUDA calls hipError_t err = hipSuccess; scanf("%d",&gid); err = hipSetDevice(gid); if (err != hipSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); hipSetDevice(gid); printf("Vector Addition: C = 1/A + 1/B\n"); int N; printf("Enter the size of the vectors: "); scanf("%ld",&N); printf("%ld\n",N); long size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); // Check memory allocations if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Set the sizes of threads and blocks int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d",&threadsPerBlock); printf("%d\n",threadsPerBlock); if( threadsPerBlock > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } // int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock; int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("%d\n",blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(0); } printf("The number of blocks is %d\n", blocksPerGrid); // create the timer hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // start the timer hipEventRecord(start,0); // Allocate vectors in device memory hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_C, size); // Copy vectors from host memory to device memory hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Intime; hipEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer hipEventRecord(start,0); hipLaunchKernelGGL(( VecAdd) , dim3(blocksPerGrid), dim3(threadsPerBlock) , 0, 0, d_A, d_B, d_C, N); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float gputime; hipEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer hipEventRecord(start,0); hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); hipFree(d_A); hipFree(d_B); hipFree(d_C); // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float Outime; hipEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // start the timer hipEventRecord(start,0); h_D = (float*)malloc(size); // to compute the reference solution for (long i = 0; i < N; ++i) h_D[i] = 1.0/h_A[i] + 1.0/h_B[i]; // stop the timer hipEventRecord(stop,0); hipEventSynchronize(stop); float cputime; hipEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); // destroy the timer hipEventDestroy(start); hipEventDestroy(stop); // check result printf("Check result:\n"); double sum=0; double diff; for (long i = 0; i < N; ++i) { diff = abs(h_D[i] - h_C[i]); sum += diff*diff; if(diff > 1.0e-15) { // printf("i=%d, h_D=%15.10e, h_C=%15.10e \n", i, h_D[i], h_C[i]); } } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n\n",sum); hipDeviceReset(); } // Allocates an array with random float entries. void RandomInit(float* data, long n) { for (long i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
226a342b0130037069c112ecfd7c0dc766a91cca.cu
// Vector addition: C = 1/A + 1/B, for arbitrarily long vectors // compile with the following command: // // (for GTX970) // nvcc -arch=compute_52 -code=sm_52,sm_52 -O3 -m64 -o vecAdd vecAdd.cu // // (for GTX1060) // nvcc -arch=compute_61 -code=sm_61,sm_61 -O3 -m64 -o vecAdd vecAdd.cu // Includes #include <stdio.h> #include <stdlib.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> // Variables float* h_A; // host vectors float* h_B; float* h_C; float* h_D; float* d_A; // device vectors float* d_B; float* d_C; // Functions void RandomInit(float*, long); // Device code __global__ void VecAdd(const float* A, const float* B, float* C, long N) { long i = blockDim.x * blockIdx.x + threadIdx.x; // if (i < N) // only for N < blockDim.x*gridDim.x // C[i] = 1.0/A[i] + 1.0/B[i]; while (i < N) { C[i] = 1.0/A[i] + 1.0/B[i]; i += blockDim.x * gridDim.x; // go to the next grid } __syncthreads(); } // Host code int main(void) { int gid; // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; scanf("%d",&gid); err = cudaSetDevice(gid); if (err != cudaSuccess) { printf("!!! Cannot select GPU with device ID = %d\n", gid); exit(1); } printf("Set GPU with device ID = %d\n", gid); cudaSetDevice(gid); printf("Vector Addition: C = 1/A + 1/B\n"); int N; printf("Enter the size of the vectors: "); scanf("%ld",&N); printf("%ld\n",N); long size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); // Check memory allocations if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); // Set the sizes of threads and blocks int threadsPerBlock; printf("Enter the number of threads per block: "); scanf("%d",&threadsPerBlock); printf("%d\n",threadsPerBlock); if( threadsPerBlock > 1024 ) { printf("The number of threads per block must be less than 1024 ! \n"); exit(0); } // int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock; int blocksPerGrid; printf("Enter the number of blocks per grid: "); scanf("%d",&blocksPerGrid); printf("%d\n",blocksPerGrid); if( blocksPerGrid > 2147483647 ) { printf("The number of blocks must be less than 2147483647 ! \n"); exit(0); } printf("The number of blocks is %d\n", blocksPerGrid); // create the timer cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start the timer cudaEventRecord(start,0); // Allocate vectors in device memory cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Intime; cudaEventElapsedTime( &Intime, start, stop); printf("Input time for GPU: %f (ms) \n",Intime); // start the timer cudaEventRecord(start,0); VecAdd <<< blocksPerGrid, threadsPerBlock >>> (d_A, d_B, d_C, N); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float gputime; cudaEventElapsedTime( &gputime, start, stop); printf("Processing time for GPU: %f (ms) \n",gputime); printf("GPU Gflops: %f\n",3*N/(1000000.0*gputime)); // Copy result from device memory to host memory // h_C contains the result in host memory // start the timer cudaEventRecord(start,0); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float Outime; cudaEventElapsedTime( &Outime, start, stop); printf("Output time for GPU: %f (ms) \n",Outime); float gputime_tot; gputime_tot = Intime + gputime + Outime; printf("Total time for GPU: %f (ms) \n",gputime_tot); // start the timer cudaEventRecord(start,0); h_D = (float*)malloc(size); // to compute the reference solution for (long i = 0; i < N; ++i) h_D[i] = 1.0/h_A[i] + 1.0/h_B[i]; // stop the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); float cputime; cudaEventElapsedTime( &cputime, start, stop); printf("Processing time for CPU: %f (ms) \n",cputime); printf("CPU Gflops: %f\n",3*N/(1000000.0*cputime)); printf("Speed up of GPU = %f\n", cputime/(gputime_tot)); // destroy the timer cudaEventDestroy(start); cudaEventDestroy(stop); // check result printf("Check result:\n"); double sum=0; double diff; for (long i = 0; i < N; ++i) { diff = abs(h_D[i] - h_C[i]); sum += diff*diff; if(diff > 1.0e-15) { // printf("i=%d, h_D=%15.10e, h_C=%15.10e \n", i, h_D[i], h_C[i]); } } sum = sqrt(sum); printf("norm(h_C - h_D)=%20.15e\n\n",sum); cudaDeviceReset(); } // Allocates an array with random float entries. void RandomInit(float* data, long n) { for (long i = 0; i < n; ++i) data[i] = rand() / (float)RAND_MAX; }
a9ca1e2692fb6710e71f8615d32f5a7c4a1dda41.hip
// !!! This is a file automatically generated by hipify!!! #include "DT.cuh" DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) { maxDTLength = argmaxDTLength; maxDocLength = argMaxDocLength; numChunks = argNumChunks; NZDTCount = new int[maxDocLength]; DTIndex = new int[maxDTLength]; DTValue = new int[maxDTLength]; //DTCount = new int[maxDocLength]; //DTOffset = new int[maxDocLength]; DTLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; } void DTChunk::loadDocDTLength(string argFilePrefix) { ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { DTLength >> DTLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; } DTLength.close(); docLength.close(); } void DTChunk::CPUMemSet() { memset(NZDTCount, 0, maxDocLength * sizeof(int)); memset(DTIndex, 0, maxDTLength * sizeof(int)); memset(DTValue, 0, maxDTLength * sizeof(int)); //memset(DTCount, 0, maxDocLength * sizeof(int)); //memset(DTOffset, 0, maxDocLength * sizeof(int)); } void DTChunk::GPUMemAllocate() { hipMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(int)); hipMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int)); hipMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int)); hipMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int)); DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0; printf("DT memory usage:%f GB\n", DTMemory); } void DTChunk::loadDTCountOffset(string argFilePrefix) { /*chunkId = argChunkId;*/ for (int chunkId = 0; chunkId < numChunks; chunkId++) { string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL int* DTCount = new int[docLengthVec[chunkId]]; int* DTOffset = new int[docLengthVec[chunkId]]; memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int)); memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int)); for (int i = 0; i < docLengthVec[chunkId]; i++) { DTCountOffset >> DTCount[i] >> DTOffset[i]; } DTCountOffset.close(); DTCountVec.push_back(DTCount); DTOffsetVec.push_back(DTOffset); } } void DTChunk::CPU2GPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); } void DTChunk::GPUMemSet(int argChunkId) { chunkId = argChunkId; hipMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int)); hipMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(int)); hipMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int)); } void DTChunk::CPU2GPUDTCountOffset(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(deviceDTCount, DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(deviceDTOffset, DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), hipMemcpyHostToDevice); } void DTChunk::GPU2CPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; hipMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), hipMemcpyDeviceToHost); } void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary); for (int i = 0; i < docLengthVec[chunkId]; i++) { OutputNZDTCount << NZDTCount[i] << "\n"; } OutputNZDTCount.close(); ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < DTLengthVec[chunkId]; i++) { OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n"; } OutputDTIndexValue.close(); }
a9ca1e2692fb6710e71f8615d32f5a7c4a1dda41.cu
#include "DT.cuh" DTChunk::DTChunk(int argmaxDTLength, int argMaxDocLength, int argNumChunks) { maxDTLength = argmaxDTLength; maxDocLength = argMaxDocLength; numChunks = argNumChunks; NZDTCount = new int[maxDocLength]; DTIndex = new int[maxDTLength]; DTValue = new int[maxDTLength]; //DTCount = new int[maxDocLength]; //DTOffset = new int[maxDocLength]; DTLengthVec = new int[numChunks]; docLengthVec = new int[numChunks]; } void DTChunk::loadDocDTLength(string argFilePrefix) { ifstream DTLength((argFilePrefix + string("/DTLength.txt")).c_str(), ios::binary);//store max Doc and DT length ifstream docLength((argFilePrefix + string("/docLength.txt")).c_str(), ios::binary);//store max Doc and DT length for (int chunkId = 0; chunkId < numChunks; chunkId++) { DTLength >> DTLengthVec[chunkId]; docLength >> docLengthVec[chunkId]; } DTLength.close(); docLength.close(); } void DTChunk::CPUMemSet() { memset(NZDTCount, 0, maxDocLength * sizeof(int)); memset(DTIndex, 0, maxDTLength * sizeof(int)); memset(DTValue, 0, maxDTLength * sizeof(int)); //memset(DTCount, 0, maxDocLength * sizeof(int)); //memset(DTOffset, 0, maxDocLength * sizeof(int)); } void DTChunk::GPUMemAllocate() { cudaMalloc((void**)&deviceNZDTCount, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceDTIndex, (maxDTLength) * sizeof(int)); cudaMalloc((void**)&deviceDTValue, (maxDTLength) * sizeof(int)); cudaMalloc((void**)&deviceDTCount, (maxDocLength) * sizeof(int)); cudaMalloc((void**)&deviceDTOffset, (maxDocLength) * sizeof(int)); DTMemory = (3 * maxDocLength + 2 * maxDTLength) * sizeof(int) / 1000000000.0; printf("DT memory usage:%f GB\n", DTMemory); } void DTChunk::loadDTCountOffset(string argFilePrefix) { /*chunkId = argChunkId;*/ for (int chunkId = 0; chunkId < numChunks; chunkId++) { string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ifstream DTCountOffset((chunkFolderName + string("/DTCountOffset.txt")).c_str(), ios::binary);//store Word offset of TL int* DTCount = new int[docLengthVec[chunkId]]; int* DTOffset = new int[docLengthVec[chunkId]]; memset(DTCount, 0, docLengthVec[chunkId] * sizeof(int)); memset(DTOffset, 0, docLengthVec[chunkId] * sizeof(int)); for (int i = 0; i < docLengthVec[chunkId]; i++) { DTCountOffset >> DTCount[i] >> DTOffset[i]; } DTCountOffset.close(); DTCountVec.push_back(DTCount); DTOffsetVec.push_back(DTOffset); } } void DTChunk::CPU2GPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(deviceNZDTCount, NZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTIndex, DTIndex, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTValue, DTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void DTChunk::GPUMemSet(int argChunkId) { chunkId = argChunkId; cudaMemset(deviceNZDTCount, 0, (maxDocLength) * sizeof(int)); cudaMemset(deviceDTIndex, 0, (maxDTLength) * sizeof(int)); cudaMemset(deviceDTValue, 0, (maxDTLength) * sizeof(int)); } void DTChunk::CPU2GPUDTCountOffset(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(deviceDTCount, DTCountVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceDTOffset, DTOffsetVec[chunkId], (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyHostToDevice); } void DTChunk::GPU2CPU(int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; cudaMemcpy(NZDTCount, deviceNZDTCount, (docLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(DTIndex, deviceDTIndex, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(DTValue, deviceDTValue, (DTLengthVec[chunkId]) * sizeof(int), cudaMemcpyDeviceToHost); } void DTChunk::CPU2Disk(string argFilePrefix,int argChunkId) { chunkId = argChunkId; //docLength = argDocLength; string chunkFolderName = argFilePrefix + "/chunk" + to_string(chunkId); ofstream OutputNZDTCount((chunkFolderName + string("/NZDTCount.txt")).c_str(), ios::binary); for (int i = 0; i < docLengthVec[chunkId]; i++) { OutputNZDTCount << NZDTCount[i] << "\n"; } OutputNZDTCount.close(); ofstream OutputDTIndexValue((chunkFolderName + string("/DTIndexValue.txt")).c_str(), ios::binary); for (int i = 0; i < DTLengthVec[chunkId]; i++) { OutputDTIndexValue << DTIndex[i] <<" "<<DTValue[i]<< "\n"; } OutputDTIndexValue.close(); }
bcb9a09a4cd47009949505882d83a073966a0ada.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { for (size_t i = 0; i < 1000; ++i) { p[idx] = l[idx] + r[idx]; } } }
bcb9a09a4cd47009949505882d83a073966a0ada.cu
extern "C" __global__ void vecAdd(int *l, int *r, int *p, size_t N) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < N) { for (size_t i = 0; i < 1000; ++i) { p[idx] = l[idx] + r[idx]; } } }
936cae53bcbc62fc0a0d7d1087ef2381d18e8f28.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "roi_align_impl.cuh" #include "util.cuh" #include "runtime/device/gpu/cuda_common.h" inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); } inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); } template <typename T> __device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high, int *y_high, T *w1, T *w2, T *w3, T *w4) { // return 0 if out of map boundary if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { *w1 = *w2 = *w3 = *w4 = 0; *x_low = *x_high = *y_low = *y_high = -1; return; } // low bounder is at least zero y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y; x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x; // top left point *y_low = roi_cast_int(y); *x_low = roi_cast_int(x); // bottom right point if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } // distance to nearest points T lx, ly, hx, hy; ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low); hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // weight is evaluated by the distance to point away. // the closer to point home, the more weight, the farther to point away. *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> __device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw, int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h, T *roi_start_w) { // (n, c, ph, pw) is the base param of pooled map *pw = thread_idx % pooled_width; *ph = (thread_idx / pooled_width) % pooled_height; *c = (thread_idx / pooled_width / pooled_height) % channels; *n = thread_idx / pooled_width / pooled_height / channels; // Roi has // 1. 4 points, or // 2. indicator + 4 points (1 + 4) const T *roi_box = roi_boxes + (*n) * roi_cols; int roi_batch_ind = 0; if (roi_cols == 5) { roi_batch_ind = roi_box[0]; roi_box++; } // Scale and shift ROI T roi_offset = roi_end_mode == 1 ? static_cast<T>(0.5) : static_cast<T>(.0); *roi_start_w = roi_box[0] * spatial_scale - roi_offset; *roi_start_h = roi_box[1] * spatial_scale - roi_offset; T roi_end_w = roi_box[2] * spatial_scale - roi_offset; T roi_end_h = roi_box[3] * spatial_scale - roi_offset; // New ROI height/width T roi_width = roi_end_w - (*roi_start_w); T roi_height = roi_end_h - (*roi_start_h); // ratio of roi / pooled *bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); *bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); *offset = (roi_batch_ind * channels + (*c)) * height * width; // grid (int) by Sample ratio if defined, otherwise by pooled H/W *roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height)); *roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width)); return; } template <typename T> __global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int offset, n, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; T accumulate_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) { T v1 = input[offset + y_low * width + x_low]; T v2 = input[offset + y_low * width + x_high]; T v3 = input[offset + y_high * width + x_low]; T v4 = input[offset + y_high * width + x_high]; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); accumulate_val += val; } } } accumulate_val /= count_points_in_grid_cell; out_data[thread_idx] = accumulate_val; } } template <typename T> void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; hipLaunchKernelGGL(( ROIAlignKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, x, roi_boxes, roi_cols, out_data, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template <typename T> __global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int offset, n, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T *offset_top_diff = dy + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) { T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell; T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell; T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell; T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell; T *dx_1 = dx + offset + y_low * width + x_low; T *dx_2 = dx + offset + y_low * width + x_high; T *dx_3 = dx + offset + y_high * width + x_low; T *dx_4 = dx + offset + y_high * width + x_high; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { MsAtomicAdd(dx_1, g1); MsAtomicAdd(dx_2, g2); MsAtomicAdd(dx_3, g3); MsAtomicAdd(dx_4, g4); } } } } } } template <typename T> void ROIAlignGrad(const T *dy, const T *roi_boxes, int roi_rows, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; hipLaunchKernelGGL(( ROIAlignGradKernel), dim3(GET_BLOCKS(size)), dim3(GET_THREADS), 0, cuda_stream, size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int roi_rows, int roi_cols, float *dx, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream); template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int roi_rows, int roi_cols, half *dx, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, hipStream_t cuda_stream);
936cae53bcbc62fc0a0d7d1087ef2381d18e8f28.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "roi_align_impl.cuh" #include "util.cuh" #include "runtime/device/gpu/cuda_common.h" inline __device__ int roi_cast_int(float x) { return __float2int_rd(x); } inline __device__ int roi_cast_int(half x) { return __half2int_rd(x); } template <typename T> __device__ void bilinear_interpolate(const int height, const int width, T y, T x, int *x_low, int *y_low, int *x_high, int *y_high, T *w1, T *w2, T *w3, T *w4) { // return 0 if out of map boundary if (y < static_cast<T>(-1.0) || y > static_cast<T>(height) || x < static_cast<T>(-1.0) || x > static_cast<T>(width)) { *w1 = *w2 = *w3 = *w4 = 0; *x_low = *x_high = *y_low = *y_high = -1; return; } // low bounder is at least zero y = y <= static_cast<T>(.0) ? static_cast<T>(.0) : y; x = x <= static_cast<T>(.0) ? static_cast<T>(.0) : x; // top left point *y_low = roi_cast_int(y); *x_low = roi_cast_int(x); // bottom right point if (*y_low >= height - 1) { *y_high = *y_low = height - 1; y = static_cast<T>(*y_low); } else { *y_high = *y_low + 1; } if (*x_low >= width - 1) { *x_high = *x_low = width - 1; x = static_cast<T>(*x_low); } else { *x_high = *x_low + 1; } // distance to nearest points T lx, ly, hx, hy; ly = y - static_cast<T>(*y_low), lx = x - static_cast<T>(*x_low); hy = static_cast<T>(1.) - ly, hx = static_cast<T>(1.) - lx; // weight is evaluated by the distance to point away. // the closer to point home, the more weight, the farther to point away. *w1 = hy * hx, *w2 = hy * lx, *w3 = ly * hx, *w4 = ly * lx; return; } template <typename T> __device__ void bin_box(int thread_idx, const T *roi_boxes, int roi_cols, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, int *offset, int *n, int *c, int *ph, int *pw, int *roi_bin_grid_h, int *roi_bin_grid_w, T *bin_size_h, T *bin_size_w, T *roi_start_h, T *roi_start_w) { // (n, c, ph, pw) is the base param of pooled map *pw = thread_idx % pooled_width; *ph = (thread_idx / pooled_width) % pooled_height; *c = (thread_idx / pooled_width / pooled_height) % channels; *n = thread_idx / pooled_width / pooled_height / channels; // Roi has // 1. 4 points, or // 2. indicator + 4 points (1 + 4) const T *roi_box = roi_boxes + (*n) * roi_cols; int roi_batch_ind = 0; if (roi_cols == 5) { roi_batch_ind = roi_box[0]; roi_box++; } // Scale and shift ROI T roi_offset = roi_end_mode == 1 ? static_cast<T>(0.5) : static_cast<T>(.0); *roi_start_w = roi_box[0] * spatial_scale - roi_offset; *roi_start_h = roi_box[1] * spatial_scale - roi_offset; T roi_end_w = roi_box[2] * spatial_scale - roi_offset; T roi_end_h = roi_box[3] * spatial_scale - roi_offset; // New ROI height/width T roi_width = roi_end_w - (*roi_start_w); T roi_height = roi_end_h - (*roi_start_h); // ratio of roi / pooled *bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); *bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); *offset = (roi_batch_ind * channels + (*c)) * height * width; // grid (int) by Sample ratio if defined, otherwise by pooled H/W *roi_bin_grid_h = (sample_num > 0) ? sample_num : roi_cast_int(roi_height / static_cast<T>(pooled_height)); *roi_bin_grid_w = (sample_num > 0) ? sample_num : roi_cast_int(roi_width / static_cast<T>(pooled_width)); return; } template <typename T> __global__ void ROIAlignKernel(size_t size, const T *input, const T *roi_boxes, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int offset, n, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; T accumulate_val = 0.; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) { T v1 = input[offset + y_low * width + x_low]; T v2 = input[offset + y_low * width + x_high]; T v3 = input[offset + y_high * width + x_low]; T v4 = input[offset + y_high * width + x_high]; T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); accumulate_val += val; } } } accumulate_val /= count_points_in_grid_cell; out_data[thread_idx] = accumulate_val; } } template <typename T> void ROIAlign(const T *x, const T *roi_boxes, int roi_rows, int roi_cols, T *out_data, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; ROIAlignKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>(size, x, roi_boxes, roi_cols, out_data, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlign<float>(const float *x, const float *roi_boxes, int roi_rows, int roi_cols, float *out_data, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template void ROIAlign<half>(const half *x, const half *roi_boxes, int roi_rows, int roi_cols, half *out_data, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template <typename T> __global__ void ROIAlignGradKernel(size_t size, const T *dy, const T *roi_boxes, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width) { for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size; thread_idx += blockDim.x * gridDim.x) { int offset, n, c, ph, pw, roi_bin_grid_h, roi_bin_grid_w; T bin_size_h, bin_size_w, roi_start_h, roi_start_w; bin_box(thread_idx, roi_boxes, roi_cols, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width, &offset, &n, &c, &ph, &pw, &roi_bin_grid_h, &roi_bin_grid_w, &bin_size_h, &bin_size_w, &roi_start_h, &roi_start_w); // (n, c, ph, pw) is the base param of pooled map const T count_points_in_grid_cell = roi_bin_grid_h * roi_bin_grid_w; int top_offset = (n * channels + c) * pooled_height * pooled_width; const T *offset_top_diff = dy + top_offset; const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw]; for (int iy = 0; iy < roi_bin_grid_h; iy++) { // Shift half point RIGHT for y / x, while previous scaled roi shift half point LEFT const T y = roi_start_h + static_cast<T>(ph) * bin_size_h + static_cast<T>(iy + .5f) * bin_size_h / static_cast<T>(roi_bin_grid_h); for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + static_cast<T>(pw) * bin_size_w + static_cast<T>(ix + .5f) * bin_size_w / static_cast<T>(roi_bin_grid_w); // bilinear interpolate by shifted y / x // calculate bilinear interpolation int x_low = 0, y_low = 0, x_high = 0, y_high = 0; T w1, w2, w3, w4; bilinear_interpolate(height, width, y, x, &x_low, &y_low, &x_high, &y_high, &w1, &w2, &w3, &w4); if (x_low != -1 || x_high != -1 || y_low != -1 || y_high != -1) { T g1 = top_diff_this_bin * w1 / count_points_in_grid_cell; T g2 = top_diff_this_bin * w2 / count_points_in_grid_cell; T g3 = top_diff_this_bin * w3 / count_points_in_grid_cell; T g4 = top_diff_this_bin * w4 / count_points_in_grid_cell; T *dx_1 = dx + offset + y_low * width + x_low; T *dx_2 = dx + offset + y_low * width + x_high; T *dx_3 = dx + offset + y_high * width + x_low; T *dx_4 = dx + offset + y_high * width + x_high; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { MsAtomicAdd(dx_1, g1); MsAtomicAdd(dx_2, g2); MsAtomicAdd(dx_3, g3); MsAtomicAdd(dx_4, g4); } } } } } } template <typename T> void ROIAlignGrad(const T *dy, const T *roi_boxes, int roi_rows, int roi_cols, T *dx, const T spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream) { size_t size = roi_rows * channels * pooled_height * pooled_width; ROIAlignGradKernel<<<GET_BLOCKS(size), GET_THREADS, 0, cuda_stream>>>( size, dy, roi_boxes, roi_cols, dx, spatial_scale, sample_num, roi_end_mode, channels, height, width, pooled_height, pooled_width); return; } template void ROIAlignGrad<float>(const float *dy, const float *roi_boxes, int roi_rows, int roi_cols, float *dx, const float spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream); template void ROIAlignGrad<half>(const half *dy, const half *roi_boxes, int roi_rows, int roi_cols, half *dx, const half spatial_scale, const int sample_num, int roi_end_mode, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, cudaStream_t cuda_stream);
838aff0b01c1e88cb314f758d59f3c77ef936f09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_left; int xdim0_update_halo_kernel5_plus_4_left_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_left; int ydim0_update_halo_kernel5_plus_4_left_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_left; int xdim1_update_halo_kernel5_plus_4_left_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_left; int ydim1_update_halo_kernel5_plus_4_left_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_left*(y)+xdim0_update_halo_kernel5_plus_4_left*ydim0_update_halo_kernel5_plus_4_left*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_left*(y)+xdim1_update_halo_kernel5_plus_4_left*ydim1_update_halo_kernel5_plus_4_left*(z)) //user function __device__ inline void update_halo_kernel5_plus_4_left(double *vol_flux_z, double *mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(4,0,0)]); if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_left( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_4_left + idx_z * 1 * xdim0_update_halo_kernel5_plus_4_left * ydim0_update_halo_kernel5_plus_4_left; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_4_left + idx_z * 1 * xdim1_update_halo_kernel5_plus_4_left * ydim1_update_halo_kernel5_plus_4_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_left(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(117,"update_halo_kernel5_plus_4_left"); OPS_kernels[117].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h || ydim0 != ydim0_update_halo_kernel5_plus_4_left_h || xdim1 != xdim1_update_halo_kernel5_plus_4_left_h || ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_left, &xdim0, sizeof(int) ); xdim0_update_halo_kernel5_plus_4_left_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_left, &ydim0, sizeof(int) ); ydim0_update_halo_kernel5_plus_4_left_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_left, &xdim1, sizeof(int) ); xdim1_update_halo_kernel5_plus_4_left_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_left, &ydim1, sizeof(int) ); ydim1_update_halo_kernel5_plus_4_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[117].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[117].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[117].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[117].transfer += ops_compute_transfer(dim, range, &arg1); }
838aff0b01c1e88cb314f758d59f3c77ef936f09.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_left; int xdim0_update_halo_kernel5_plus_4_left_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_left; int ydim0_update_halo_kernel5_plus_4_left_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_left; int xdim1_update_halo_kernel5_plus_4_left_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_left; int ydim1_update_halo_kernel5_plus_4_left_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel5_plus_4_left*(y)+xdim0_update_halo_kernel5_plus_4_left*ydim0_update_halo_kernel5_plus_4_left*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel5_plus_4_left*(y)+xdim1_update_halo_kernel5_plus_4_left*ydim1_update_halo_kernel5_plus_4_left*(z)) //user function __device__ inline void update_halo_kernel5_plus_4_left(double *vol_flux_z, double *mass_flux_z, const int* fields) { if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0,0,0)] = (vol_flux_z[OPS_ACC0(4,0,0)]); if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0,0,0)] = (mass_flux_z[OPS_ACC1(4,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_left( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel5_plus_4_left + idx_z * 1 * xdim0_update_halo_kernel5_plus_4_left * ydim0_update_halo_kernel5_plus_4_left; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel5_plus_4_left + idx_z * 1 * xdim1_update_halo_kernel5_plus_4_left * ydim1_update_halo_kernel5_plus_4_left; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_left(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_left(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(117,"update_halo_kernel5_plus_4_left"); OPS_kernels[117].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel5_plus_4_left_h || ydim0 != ydim0_update_halo_kernel5_plus_4_left_h || xdim1 != xdim1_update_halo_kernel5_plus_4_left_h || ydim1 != ydim1_update_halo_kernel5_plus_4_left_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel5_plus_4_left, &xdim0, sizeof(int) ); xdim0_update_halo_kernel5_plus_4_left_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel5_plus_4_left, &ydim0, sizeof(int) ); ydim0_update_halo_kernel5_plus_4_left_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel5_plus_4_left, &xdim1, sizeof(int) ); xdim1_update_halo_kernel5_plus_4_left_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel5_plus_4_left, &ydim1, sizeof(int) ); ydim1_update_halo_kernel5_plus_4_left_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[117].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[117].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[117].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[117].transfer += ops_compute_transfer(dim, range, &arg1); }
a3711b98d9c6ad5508d1c822fa278161cb40f8d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" /* Copyright 2014-2015 Dake Feng, Peri LLC, [email protected] This file is part of TomograPeri. TomograPeri is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. TomograPeri is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TomograPeri. If not, see <http://www.gnu.org/licenses/>. */ #define blockx 16 #define blocky 16 __global__ void _weightBRightkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg3, float *dev_recon) { int ind0, indg[3],q; int k=blockIdx.x*blockDim.x + threadIdx.x; if (k>=num_slices) return; ind0 = (num_grid-1) + (num_grid-1)*num_grid + k*num_grid*num_grid; indg[0] = ind0-1; indg[1] = ind0-num_grid; indg[2] = ind0-num_grid-1; for (q = 0; q < 3; q++) { dev_F[ind0] += 2*beta*dev_wg3[q]; dev_G[ind0] -= 2*beta*dev_wg3[q]*(dev_recon[ind0]+dev_recon[indg[q]]); } }
a3711b98d9c6ad5508d1c822fa278161cb40f8d9.cu
#include "includes.h" /* Copyright 2014-2015 Dake Feng, Peri LLC, [email protected] This file is part of TomograPeri. TomograPeri is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. TomograPeri is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with TomograPeri. If not, see <http://www.gnu.org/licenses/>. */ #define blockx 16 #define blocky 16 __global__ void _weightBRightkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg3, float *dev_recon) { int ind0, indg[3],q; int k=blockIdx.x*blockDim.x + threadIdx.x; if (k>=num_slices) return; ind0 = (num_grid-1) + (num_grid-1)*num_grid + k*num_grid*num_grid; indg[0] = ind0-1; indg[1] = ind0-num_grid; indg[2] = ind0-num_grid-1; for (q = 0; q < 3; q++) { dev_F[ind0] += 2*beta*dev_wg3[q]; dev_G[ind0] -= 2*beta*dev_wg3[q]*(dev_recon[ind0]+dev_recon[indg[q]]); } }
1d7f115f95ff3c53822b9eb969bd8d9f247a0484.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(hipGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to hipSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(hipSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(hipMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(hipMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(hipMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, hipMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(hipFree(boxes_dev)); CUDA_CHECK(hipFree(mask_dev)); }
1d7f115f95ff3c53822b9eb969bd8d9f247a0484.cu
// ------------------------------------------------------------------ // Faster R-CNN // Copyright (c) 2015 Microsoft // Licensed under The MIT License [see fast-rcnn/LICENSE for details] // Written by Shaoqing Ren // ------------------------------------------------------------------ #include "gpu_nms.hpp" #include <vector> #include <iostream> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) int const threadsPerBlock = sizeof(unsigned long long) * 8; __device__ inline float devIoU(float const * const a, float const * const b) { float left = max(a[0], b[0]), right = min(a[2], b[2]); float top = max(a[1], b[1]), bottom = min(a[3], b[3]); float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f); float interS = width * height; float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1); float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1); return interS / (Sa + Sb - interS); } __global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh, const float *dev_boxes, unsigned long long *dev_mask) { const int row_start = blockIdx.y; const int col_start = blockIdx.x; // if (row_start > col_start) return; const int row_size = min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); const int col_size = min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); __shared__ float block_boxes[threadsPerBlock * 5]; if (threadIdx.x < col_size) { block_boxes[threadIdx.x * 5 + 0] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0]; block_boxes[threadIdx.x * 5 + 1] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1]; block_boxes[threadIdx.x * 5 + 2] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2]; block_boxes[threadIdx.x * 5 + 3] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3]; block_boxes[threadIdx.x * 5 + 4] = dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4]; } __syncthreads(); if (threadIdx.x < row_size) { const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; const float *cur_box = dev_boxes + cur_box_idx * 5; int i = 0; unsigned long long t = 0; int start = 0; if (row_start == col_start) { start = threadIdx.x + 1; } for (i = start; i < col_size; i++) { if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) { t |= 1ULL << i; } } const int col_blocks = DIVUP(n_boxes, threadsPerBlock); dev_mask[cur_box_idx * col_blocks + col_start] = t; } } void _set_device(int device_id) { int current_device; CUDA_CHECK(cudaGetDevice(&current_device)); if (current_device == device_id) { return; } // The call to cudaSetDevice must come before any calls to Get, which // may perform initialization using the GPU. CUDA_CHECK(cudaSetDevice(device_id)); } void _nms(int* keep_out, int* num_out, const float* boxes_host, int boxes_num, int boxes_dim, float nms_overlap_thresh, int device_id) { _set_device(device_id); float* boxes_dev = NULL; unsigned long long* mask_dev = NULL; const int col_blocks = DIVUP(boxes_num, threadsPerBlock); CUDA_CHECK(cudaMalloc(&boxes_dev, boxes_num * boxes_dim * sizeof(float))); CUDA_CHECK(cudaMemcpy(boxes_dev, boxes_host, boxes_num * boxes_dim * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc(&mask_dev, boxes_num * col_blocks * sizeof(unsigned long long))); dim3 blocks(DIVUP(boxes_num, threadsPerBlock), DIVUP(boxes_num, threadsPerBlock)); dim3 threads(threadsPerBlock); nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes_dev, mask_dev); std::vector<unsigned long long> mask_host(boxes_num * col_blocks); CUDA_CHECK(cudaMemcpy(&mask_host[0], mask_dev, sizeof(unsigned long long) * boxes_num * col_blocks, cudaMemcpyDeviceToHost)); std::vector<unsigned long long> remv(col_blocks); memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); int num_to_keep = 0; for (int i = 0; i < boxes_num; i++) { int nblock = i / threadsPerBlock; int inblock = i % threadsPerBlock; if (!(remv[nblock] & (1ULL << inblock))) { keep_out[num_to_keep++] = i; unsigned long long *p = &mask_host[0] + i * col_blocks; for (int j = nblock; j < col_blocks; j++) { remv[j] |= p[j]; } } } *num_out = num_to_keep; CUDA_CHECK(cudaFree(boxes_dev)); CUDA_CHECK(cudaFree(mask_dev)); }
8607856253c08276695a6ae2e123520086a967cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "pairwise_hist.cuh" #include "split_properties_helpers.cuh" #include "pairwise_hist_one_byte_5bit.cuh" #include "pairwise_hist_one_byte_6bit_hip.cuh" #include "pairwise_hist_one_byte_7bit.cuh" #include "pairwise_hist_one_byte_8bit_atomics.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> namespace NKernel { template <bool FULL_PASS> __global__ void BuildBinaryFeatureHistograms(const TCFeature* nbFeatures, int featureCount, const TDataPartition* partition, const TPartitionStatistics* partitionStats, const ui64 histLineSize, float* histogram) { if (FULL_PASS) { partitionStats += blockIdx.y; histogram += blockIdx.y * histLineSize * 4; } else { const int depth = (int)log2((float)gridDim.y); int partId = GetPairwisePartIdToCalculate(partition); partitionStats += partId; histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4; } const int featuresPerBlock = blockDim.x / 32; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / 32; nbFeatures += featureId; const float partWeight = partitionStats->Weight; if (featureId >= featureCount || partitionStats->Weight == 0) { return; } const int x = threadIdx.x & 31; const ui32 featureFolds = nbFeatures->Folds; const ui32 featureOffset = nbFeatures->FirstFoldIndex; if (nbFeatures->OneHotFeature) { for (ui32 fold = x; fold < featureFolds; fold += 32) { const ui32 offset = featureOffset + fold; const float hist0 = histogram[4 * offset]; // const float hist1 = histogram[4 * offset + 1]; const float hist2 = histogram[4 * offset + 2]; const float hist3 = histogram[4 * offset + 3]; const float w00 = max(hist0, 0.0f); const float w01 = max(hist2, 0.0f); const float w10 = max(hist3, 0.0f); const float w11 = max(partWeight - hist0 - hist2 - hist3, 0.0f); histogram[4 * offset] = w00; histogram[4 * offset + 1] = w01; histogram[4 * offset + 2] = w10; histogram[4 * offset + 3] = w11; } } else { for (ui32 fold = x; fold < featureFolds; fold += 32) { const ui32 offset = featureOffset + fold; const float hist0 = histogram[4 * offset]; const float hist1 = histogram[4 * offset + 1]; const float hist2 = histogram[4 * offset + 2]; const float hist3 = histogram[4 * offset + 3]; const float w00 = max(hist1 + hist2, 0.0f); const float w01 = max(hist0 - hist1, 0.0f); const float w10 = max(hist3 - hist2, 0.0f); const float w11 = max(partWeight - hist0 - hist3, 0.0f); histogram[4 * offset] = w00; histogram[4 * offset + 1] = w01; histogram[4 * offset + 2] = w10; histogram[4 * offset + 3] = w11; } } } void BuildBinaryFeatureHistograms(const TCFeature* features, ui32 featureCount, const TDataPartition* partition, const TPartitionStatistics* partitionStats, ui32 partCount, const ui64 histLineSize, bool fullPass, float* histogram, TCudaStream stream) { const int buildHistogramBlockSize = 256; dim3 numBlocks; numBlocks.x = (featureCount * 32 + buildHistogramBlockSize - 1) / buildHistogramBlockSize; numBlocks.y = fullPass ? partCount : partCount / 4; numBlocks.z = fullPass ? 1 : 3; if (fullPass) { BuildBinaryFeatureHistograms<true><< <numBlocks, buildHistogramBlockSize, 0, stream >> > (features, featureCount, partition, partitionStats, histLineSize, histogram); } else { BuildBinaryFeatureHistograms<false><< <numBlocks, buildHistogramBlockSize, 0, stream >> > (features, featureCount, partition, partitionStats, histLineSize, histogram); } } __global__ void UpdatePairwiseHistogramsImpl(ui32 firstFeatureId, ui32 featureCount, const TDataPartition* parts, const ui64 histLineSize, float* histogram) { const int histCount = 4; const int depth = (int)log2((float)gridDim.y); int partIds[4]; { int partSizes[4]; #pragma unroll for (int i = 0; i < 4; ++i) { const int partId = (i << depth) | blockIdx.y; partIds[i] = partId; partSizes[i] = parts[partId].Size; }// #pragma unroll for (int i = 0; i < 4; ++i) { #pragma unroll for (int j = i + 1; j < 4; ++j) { if (partSizes[j] > partSizes[i]) { const int tmpSize = partSizes[j]; const int tmpId = partIds[j]; partSizes[j] = partSizes[i]; partIds[j] = partIds[i]; partSizes[i] = tmpSize; partIds[i] = tmpId; } } } } const ui32 binFeature = firstFeatureId + blockIdx.x * blockDim.x + threadIdx.x; if (binFeature < (firstFeatureId + featureCount)) { float hists[histCount * 4]; #pragma unroll for (int part = 0; part < 4; ++part) { const size_t srcPartIdx = (part << depth) | blockIdx.y; #pragma unroll for (int i = 0; i < histCount; ++i) { hists[part * 4 + i] = histogram[histCount * (srcPartIdx * histLineSize + binFeature) + i]; } } #pragma unroll for (int part = 1; part < 4; ++part) { #pragma unroll for (int i = 0; i < histCount; ++i) { hists[i] -= hists[4 * part + i]; } } #pragma unroll for (int part = 0; part < 4; ++part) { const size_t destPartIdx = partIds[part]; #pragma unroll for (int i = 0; i < histCount; ++i) { histogram[histCount * (destPartIdx * histLineSize + binFeature) + i] = max(hists[part * 4 + i], 0.0f); } } } } void UpdatePairwiseHistograms(const ui32 firstFeatureId, const ui32 featureCount, const TDataPartition* dataParts, ui32 partCount, ui32 histLineSize, float* histograms, TCudaStream stream ) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = (featureCount + blockSize - 1) / blockSize; numBlocks.y = partCount / 4; numBlocks.z = 1; UpdatePairwiseHistogramsImpl<< <numBlocks, blockSize, 0, stream>>>(firstFeatureId, featureCount, dataParts, histLineSize, histograms); } void ScanPairwiseHistograms(const TCFeature* features, int featureCount, int partCount, int histLineSize, bool fullPass, float* binSums, TCudaStream stream) { const size_t histOffset = fullPass ? 0 : (partCount / 4) * ((ui64) histLineSize * 4); const int scanBlockSize = 256; dim3 scanBlocks; scanBlocks.x = (featureCount * 32 + scanBlockSize - 1) / scanBlockSize; scanBlocks.y = fullPass ? partCount : partCount * 3 / 4; scanBlocks.z = 1; ScanHistogramsImpl<scanBlockSize, 4> << < scanBlocks, scanBlockSize, 0, stream >> > (features, featureCount, histLineSize, binSums + histOffset); } void ComputePairwiseHistogramOneByte5Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte5BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte5BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte6Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte6BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte6BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte7Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte7BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte7BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte8BitAtomics(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte8BitAtomicsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte8BitAtomicsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } }
8607856253c08276695a6ae2e123520086a967cc.cu
#include "pairwise_hist.cuh" #include "split_properties_helpers.cuh" #include "pairwise_hist_one_byte_5bit.cuh" #include "pairwise_hist_one_byte_6bit.cuh" #include "pairwise_hist_one_byte_7bit.cuh" #include "pairwise_hist_one_byte_8bit_atomics.cuh" #include <catboost/cuda/cuda_lib/kernel/arch.cuh> #include <catboost/cuda/cuda_util/kernel/instructions.cuh> #include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh> namespace NKernel { template <bool FULL_PASS> __global__ void BuildBinaryFeatureHistograms(const TCFeature* nbFeatures, int featureCount, const TDataPartition* partition, const TPartitionStatistics* partitionStats, const ui64 histLineSize, float* histogram) { if (FULL_PASS) { partitionStats += blockIdx.y; histogram += blockIdx.y * histLineSize * 4; } else { const int depth = (int)log2((float)gridDim.y); int partId = GetPairwisePartIdToCalculate(partition); partitionStats += partId; histogram += (((blockIdx.z + 1) << depth) | blockIdx.y) * histLineSize * 4; } const int featuresPerBlock = blockDim.x / 32; const int featureId = blockIdx.x * featuresPerBlock + threadIdx.x / 32; nbFeatures += featureId; const float partWeight = partitionStats->Weight; if (featureId >= featureCount || partitionStats->Weight == 0) { return; } const int x = threadIdx.x & 31; const ui32 featureFolds = nbFeatures->Folds; const ui32 featureOffset = nbFeatures->FirstFoldIndex; if (nbFeatures->OneHotFeature) { for (ui32 fold = x; fold < featureFolds; fold += 32) { const ui32 offset = featureOffset + fold; const float hist0 = histogram[4 * offset]; // const float hist1 = histogram[4 * offset + 1]; const float hist2 = histogram[4 * offset + 2]; const float hist3 = histogram[4 * offset + 3]; const float w00 = max(hist0, 0.0f); const float w01 = max(hist2, 0.0f); const float w10 = max(hist3, 0.0f); const float w11 = max(partWeight - hist0 - hist2 - hist3, 0.0f); histogram[4 * offset] = w00; histogram[4 * offset + 1] = w01; histogram[4 * offset + 2] = w10; histogram[4 * offset + 3] = w11; } } else { for (ui32 fold = x; fold < featureFolds; fold += 32) { const ui32 offset = featureOffset + fold; const float hist0 = histogram[4 * offset]; const float hist1 = histogram[4 * offset + 1]; const float hist2 = histogram[4 * offset + 2]; const float hist3 = histogram[4 * offset + 3]; const float w00 = max(hist1 + hist2, 0.0f); const float w01 = max(hist0 - hist1, 0.0f); const float w10 = max(hist3 - hist2, 0.0f); const float w11 = max(partWeight - hist0 - hist3, 0.0f); histogram[4 * offset] = w00; histogram[4 * offset + 1] = w01; histogram[4 * offset + 2] = w10; histogram[4 * offset + 3] = w11; } } } void BuildBinaryFeatureHistograms(const TCFeature* features, ui32 featureCount, const TDataPartition* partition, const TPartitionStatistics* partitionStats, ui32 partCount, const ui64 histLineSize, bool fullPass, float* histogram, TCudaStream stream) { const int buildHistogramBlockSize = 256; dim3 numBlocks; numBlocks.x = (featureCount * 32 + buildHistogramBlockSize - 1) / buildHistogramBlockSize; numBlocks.y = fullPass ? partCount : partCount / 4; numBlocks.z = fullPass ? 1 : 3; if (fullPass) { BuildBinaryFeatureHistograms<true><< <numBlocks, buildHistogramBlockSize, 0, stream >> > (features, featureCount, partition, partitionStats, histLineSize, histogram); } else { BuildBinaryFeatureHistograms<false><< <numBlocks, buildHistogramBlockSize, 0, stream >> > (features, featureCount, partition, partitionStats, histLineSize, histogram); } } __global__ void UpdatePairwiseHistogramsImpl(ui32 firstFeatureId, ui32 featureCount, const TDataPartition* parts, const ui64 histLineSize, float* histogram) { const int histCount = 4; const int depth = (int)log2((float)gridDim.y); int partIds[4]; { int partSizes[4]; #pragma unroll for (int i = 0; i < 4; ++i) { const int partId = (i << depth) | blockIdx.y; partIds[i] = partId; partSizes[i] = parts[partId].Size; }// #pragma unroll for (int i = 0; i < 4; ++i) { #pragma unroll for (int j = i + 1; j < 4; ++j) { if (partSizes[j] > partSizes[i]) { const int tmpSize = partSizes[j]; const int tmpId = partIds[j]; partSizes[j] = partSizes[i]; partIds[j] = partIds[i]; partSizes[i] = tmpSize; partIds[i] = tmpId; } } } } const ui32 binFeature = firstFeatureId + blockIdx.x * blockDim.x + threadIdx.x; if (binFeature < (firstFeatureId + featureCount)) { float hists[histCount * 4]; #pragma unroll for (int part = 0; part < 4; ++part) { const size_t srcPartIdx = (part << depth) | blockIdx.y; #pragma unroll for (int i = 0; i < histCount; ++i) { hists[part * 4 + i] = histogram[histCount * (srcPartIdx * histLineSize + binFeature) + i]; } } #pragma unroll for (int part = 1; part < 4; ++part) { #pragma unroll for (int i = 0; i < histCount; ++i) { hists[i] -= hists[4 * part + i]; } } #pragma unroll for (int part = 0; part < 4; ++part) { const size_t destPartIdx = partIds[part]; #pragma unroll for (int i = 0; i < histCount; ++i) { histogram[histCount * (destPartIdx * histLineSize + binFeature) + i] = max(hists[part * 4 + i], 0.0f); } } } } void UpdatePairwiseHistograms(const ui32 firstFeatureId, const ui32 featureCount, const TDataPartition* dataParts, ui32 partCount, ui32 histLineSize, float* histograms, TCudaStream stream ) { const int blockSize = 256; dim3 numBlocks; numBlocks.x = (featureCount + blockSize - 1) / blockSize; numBlocks.y = partCount / 4; numBlocks.z = 1; UpdatePairwiseHistogramsImpl<< <numBlocks, blockSize, 0, stream>>>(firstFeatureId, featureCount, dataParts, histLineSize, histograms); } void ScanPairwiseHistograms(const TCFeature* features, int featureCount, int partCount, int histLineSize, bool fullPass, float* binSums, TCudaStream stream) { const size_t histOffset = fullPass ? 0 : (partCount / 4) * ((ui64) histLineSize * 4); const int scanBlockSize = 256; dim3 scanBlocks; scanBlocks.x = (featureCount * 32 + scanBlockSize - 1) / scanBlockSize; scanBlocks.y = fullPass ? partCount : partCount * 3 / 4; scanBlocks.z = 1; ScanHistogramsImpl<scanBlockSize, 4> << < scanBlocks, scanBlockSize, 0, stream >> > (features, featureCount, histLineSize, binSums + histOffset); } void ComputePairwiseHistogramOneByte5Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte5BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte5BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte6Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte6BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte6BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte7Bits(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte7BitsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte7BitsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } void ComputePairwiseHistogramOneByte8BitAtomics(const TCFeature* features, const TCFeature* featureCpu, const ui32 featureCount, const ui32 fiveBitsFeatureCount, const ui32* compressedIndex, const uint2* pairs, ui32 pairCount, const float* weight, const TDataPartition* partition, ui32 partCount, ui32 histLineSize, bool fullPass, float* histogram, int parallelStreamCount, TCudaStream stream) { ComputePairwiseHistogramOneByte8BitAtomicsImpl<false>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); ComputePairwiseHistogramOneByte8BitAtomicsImpl<true>(features, featureCpu, featureCount, fiveBitsFeatureCount, compressedIndex, pairs, pairCount, weight, partition, partCount, histLineSize, fullPass, histogram, parallelStreamCount, stream); } }
047e46b464dbfffa3246b85dbc3936f8708a1b05.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <backends/fvm_types.hpp> #include "detail.hpp" namespace arb { namespace gpu { namespace kernels { /// GPU implementatin of Hines matrix assembly /// Flat layout /// For a given time step size dt /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve template <typename T, typename I> __global__ void assemble_matrix_flat( T* d, T* rhs, const T* invariant_d, const T* voltage, const T* current, const T* cv_capacitance, const T* area, const I* cv_to_cell, const T* dt_cell, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid<n) { auto cid = cv_to_cell[tid]; auto dt = dt_cell[cid]; // Note: dt==0 case is expected only at the end of a mindelay/2 // integration period, and consequently divergence is unlikely // to be a peformance problem. if (dt>0) { // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units S (micro-Siemens). // See the model documentation in docs/model for more information. T factor = 1e-3/dt; auto gi = factor * cv_capacitance[tid]; d[tid] = gi + invariant_d[tid]; rhs[tid] = gi*voltage[tid] - T(1e-3)*area[tid]*current[tid]; } else { d[tid] = 0; rhs[tid] = voltage[tid]; } } } /// GPU implementatin of Hines matrix assembly /// Interleaved layout /// For a given time step size dt /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve template <typename T, typename I, unsigned BlockWidth, unsigned LoadWidth, unsigned Threads> __global__ void assemble_matrix_interleaved( T* d, T* rhs, const T* invariant_d, const T* voltage, const T* current, const T* cv_capacitance, const T* area, const I* sizes, const I* starts, const I* matrix_to_cell, const T* dt_cell, unsigned padded_size, unsigned num_mtx) { static_assert(BlockWidth*LoadWidth==Threads, "number of threads must equal number of values to process per block"); __shared__ T buffer_v[Threads]; __shared__ T buffer_i[Threads]; const unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; const unsigned lid = threadIdx.x; const unsigned mtx_id = tid/LoadWidth; const unsigned mtx_lane = tid - mtx_id*LoadWidth; const unsigned blk_id = tid/(BlockWidth*LoadWidth); const unsigned blk_row = lid/BlockWidth; const unsigned blk_lane = lid - blk_row*BlockWidth; const unsigned blk_pos = LoadWidth*blk_lane + blk_row; const bool do_load = mtx_id<num_mtx; unsigned load_pos = do_load? starts[mtx_id] + mtx_lane : 0; const unsigned end = do_load? starts[mtx_id] + sizes[mtx_id]: 0; unsigned store_pos = blk_id*BlockWidth*padded_size + (blk_row*BlockWidth + blk_lane); const unsigned max_size = sizes[0]; T factor = 0; T dt = 0; const unsigned permuted_cid = blk_id*BlockWidth + blk_lane; if (permuted_cid<num_mtx) { auto cid = matrix_to_cell[permuted_cid]; dt = dt_cell[cid]; // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units S (micro-Siemens). // See the model documentation in docs/model for more information. factor = dt>0? 1e-3/dt: 0; } for (unsigned j=0u; j<max_size; j+=LoadWidth) { if (do_load && load_pos<end) { buffer_v[lid] = voltage[load_pos]; buffer_i[lid] = current[load_pos]; } __syncthreads(); if (j+blk_row<padded_size) { const auto gi = factor * cv_capacitance[store_pos]; if (dt>0) { d[store_pos] = (gi + invariant_d[store_pos]); rhs[store_pos] = (gi*buffer_v[blk_pos] - T(1e-3)*area[store_pos]*buffer_i[blk_pos]); } else { d[store_pos] = 0; rhs[store_pos] = buffer_v[blk_pos]; } } __syncthreads(); store_pos += LoadWidth*BlockWidth; load_pos += LoadWidth; } } } // namespace kernels void assemble_matrix_flat( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_size_type* cv_to_cell, const fvm_value_type* dt_cell, unsigned n) { constexpr unsigned block_dim = 128; const unsigned grid_dim = impl::block_count(n, block_dim); hipLaunchKernelGGL(( kernels::assemble_matrix_flat <fvm_value_type, fvm_size_type>) , dim3(grid_dim), dim3(block_dim), 0, 0, d, rhs, invariant_d, voltage, current, cv_capacitance, area, cv_to_cell, dt_cell, n); } //template <typename T, typename I, unsigned BlockWidth, unsigned LoadWidth, unsigned Threads> void assemble_matrix_interleaved( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_size_type* sizes, const fvm_size_type* starts, const fvm_size_type* matrix_to_cell, const fvm_value_type* dt_cell, unsigned padded_size, unsigned num_mtx) { constexpr unsigned bd = impl::block_dim(); constexpr unsigned lw = impl::load_width(); constexpr unsigned block_dim = bd*lw; // The number of threads is threads_per_matrix*num_mtx const unsigned grid_dim = impl::block_count(num_mtx*lw, block_dim); hipLaunchKernelGGL(( kernels::assemble_matrix_interleaved <fvm_value_type, fvm_size_type, bd, lw, block_dim>) , dim3(grid_dim), dim3(block_dim), 0, 0, d, rhs, invariant_d, voltage, current, cv_capacitance, area, sizes, starts, matrix_to_cell, dt_cell, padded_size, num_mtx); } } // namespace gpu } // namespace arb
047e46b464dbfffa3246b85dbc3936f8708a1b05.cu
#include <backends/fvm_types.hpp> #include "detail.hpp" namespace arb { namespace gpu { namespace kernels { /// GPU implementatin of Hines matrix assembly /// Flat layout /// For a given time step size dt /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve template <typename T, typename I> __global__ void assemble_matrix_flat( T* d, T* rhs, const T* invariant_d, const T* voltage, const T* current, const T* cv_capacitance, const T* area, const I* cv_to_cell, const T* dt_cell, unsigned n) { const unsigned tid = threadIdx.x + blockDim.x*blockIdx.x; if (tid<n) { auto cid = cv_to_cell[tid]; auto dt = dt_cell[cid]; // Note: dt==0 case is expected only at the end of a mindelay/2 // integration period, and consequently divergence is unlikely // to be a peformance problem. if (dt>0) { // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units μS (micro-Siemens). // See the model documentation in docs/model for more information. T factor = 1e-3/dt; auto gi = factor * cv_capacitance[tid]; d[tid] = gi + invariant_d[tid]; rhs[tid] = gi*voltage[tid] - T(1e-3)*area[tid]*current[tid]; } else { d[tid] = 0; rhs[tid] = voltage[tid]; } } } /// GPU implementatin of Hines matrix assembly /// Interleaved layout /// For a given time step size dt /// - use the precomputed alpha and alpha_d values to construct the diagonal /// and off diagonal of the symmetric Hines matrix. /// - compute the RHS of the linear system to solve template <typename T, typename I, unsigned BlockWidth, unsigned LoadWidth, unsigned Threads> __global__ void assemble_matrix_interleaved( T* d, T* rhs, const T* invariant_d, const T* voltage, const T* current, const T* cv_capacitance, const T* area, const I* sizes, const I* starts, const I* matrix_to_cell, const T* dt_cell, unsigned padded_size, unsigned num_mtx) { static_assert(BlockWidth*LoadWidth==Threads, "number of threads must equal number of values to process per block"); __shared__ T buffer_v[Threads]; __shared__ T buffer_i[Threads]; const unsigned tid = threadIdx.x + blockIdx.x*blockDim.x; const unsigned lid = threadIdx.x; const unsigned mtx_id = tid/LoadWidth; const unsigned mtx_lane = tid - mtx_id*LoadWidth; const unsigned blk_id = tid/(BlockWidth*LoadWidth); const unsigned blk_row = lid/BlockWidth; const unsigned blk_lane = lid - blk_row*BlockWidth; const unsigned blk_pos = LoadWidth*blk_lane + blk_row; const bool do_load = mtx_id<num_mtx; unsigned load_pos = do_load? starts[mtx_id] + mtx_lane : 0; const unsigned end = do_load? starts[mtx_id] + sizes[mtx_id]: 0; unsigned store_pos = blk_id*BlockWidth*padded_size + (blk_row*BlockWidth + blk_lane); const unsigned max_size = sizes[0]; T factor = 0; T dt = 0; const unsigned permuted_cid = blk_id*BlockWidth + blk_lane; if (permuted_cid<num_mtx) { auto cid = matrix_to_cell[permuted_cid]; dt = dt_cell[cid]; // The 1e-3 is a constant of proportionality required to ensure that the // conductance (gi) values have units μS (micro-Siemens). // See the model documentation in docs/model for more information. factor = dt>0? 1e-3/dt: 0; } for (unsigned j=0u; j<max_size; j+=LoadWidth) { if (do_load && load_pos<end) { buffer_v[lid] = voltage[load_pos]; buffer_i[lid] = current[load_pos]; } __syncthreads(); if (j+blk_row<padded_size) { const auto gi = factor * cv_capacitance[store_pos]; if (dt>0) { d[store_pos] = (gi + invariant_d[store_pos]); rhs[store_pos] = (gi*buffer_v[blk_pos] - T(1e-3)*area[store_pos]*buffer_i[blk_pos]); } else { d[store_pos] = 0; rhs[store_pos] = buffer_v[blk_pos]; } } __syncthreads(); store_pos += LoadWidth*BlockWidth; load_pos += LoadWidth; } } } // namespace kernels void assemble_matrix_flat( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_size_type* cv_to_cell, const fvm_value_type* dt_cell, unsigned n) { constexpr unsigned block_dim = 128; const unsigned grid_dim = impl::block_count(n, block_dim); kernels::assemble_matrix_flat <fvm_value_type, fvm_size_type> <<<grid_dim, block_dim>>> (d, rhs, invariant_d, voltage, current, cv_capacitance, area, cv_to_cell, dt_cell, n); } //template <typename T, typename I, unsigned BlockWidth, unsigned LoadWidth, unsigned Threads> void assemble_matrix_interleaved( fvm_value_type* d, fvm_value_type* rhs, const fvm_value_type* invariant_d, const fvm_value_type* voltage, const fvm_value_type* current, const fvm_value_type* cv_capacitance, const fvm_value_type* area, const fvm_size_type* sizes, const fvm_size_type* starts, const fvm_size_type* matrix_to_cell, const fvm_value_type* dt_cell, unsigned padded_size, unsigned num_mtx) { constexpr unsigned bd = impl::block_dim(); constexpr unsigned lw = impl::load_width(); constexpr unsigned block_dim = bd*lw; // The number of threads is threads_per_matrix*num_mtx const unsigned grid_dim = impl::block_count(num_mtx*lw, block_dim); kernels::assemble_matrix_interleaved <fvm_value_type, fvm_size_type, bd, lw, block_dim> <<<grid_dim, block_dim>>> (d, rhs, invariant_d, voltage, current, cv_capacitance, area, sizes, starts, matrix_to_cell, dt_cell, padded_size, num_mtx); } } // namespace gpu } // namespace arb
437523bee3dcdbcc459a9ecb619e03fc055b0a5e.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <chrono> #include <random> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/partition.h> #include "intersections.h" #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "kernel.h" #define GPU_MOTION 1 #define GPU_MEASUREMENT 1 #define GPU_MAP 1 #define GPU_RESAMPLE 1 #define LIDAR_ANGLE(i) (-135.0f + i * .25f) * PI / 180 #define LIDAR_SIZE 1081 #define PARTICLE_COUNT 5000 #define COV {0.015, 0.015, .015} #define EFFECTIVE_PARTICLES .7 #define MAP_TYPE char #define FREE_WEIGHT -1 #define OCCUPIED_WEIGHT 3 #define BLOCK_SIZE 128 #define ERRORCHECK 1 #define CLAMP(a, lo, hi) (a < lo) ? lo : (a > hi) ? hi : a #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; // host variables static MAP_TYPE *occupancyGrid = NULL; static glm::vec4 particles[PARTICLE_COUNT]; static glm::ivec2 map_dim; static Geom map_params; static glm::vec3 robotPos; // device variable static MAP_TYPE *dev_occupancyGrid = NULL; static glm::vec4 *dev_particles = NULL; static int *dev_fit = NULL; static float *dev_lidar = NULL; static float *dev_weights = NULL; static bool *dev_freeCells = NULL; static bool *dev_wallCells = NULL; // timers float avg_motion = 0.0f, avg_measurement = 0.0f, avg_map = 0.0f, avg_sample = 0.0f; void particleFilterInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); map_params = scene->geoms[0]; map_dim = glm::ivec2(map_params.scale.x / map_params.resolution.x, map_params.scale.y / map_params.resolution.y); occupancyGrid = new MAP_TYPE[map_dim.x*map_dim.y]; long max_val = 1 << (sizeof(MAP_TYPE) * 8 - 1); memset(occupancyGrid, -1*(max_val-1)*0, map_dim.x*map_dim.y*sizeof(MAP_TYPE)); //particles = new glm::vec4[PARTICLE_COUNT]; for (int i = 0; i < PARTICLE_COUNT; i++) { particles[i] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f); } robotPos = glm::vec3(0.0f); hipMalloc(&dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE)); hipMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), hipMemcpyHostToDevice); hipMalloc(&dev_particles, PARTICLE_COUNT * sizeof(glm::vec4)); hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyHostToDevice); hipMalloc((void**)&dev_fit, PARTICLE_COUNT * sizeof(int)); hipMalloc((void**)&dev_weights, PARTICLE_COUNT * sizeof(float)); hipMalloc((void**)&dev_lidar, LIDAR_SIZE * sizeof(float)); hipMalloc((void**)&dev_freeCells, map_dim.x * map_dim.y * sizeof(bool)); hipMalloc((void**)&dev_wallCells, map_dim.x * map_dim.y * sizeof(bool)); checkCUDAError("particleFilterInit"); } void particleFilterFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_geoms); hipFree(dev_occupancyGrid); hipFree(dev_particles); hipFree(dev_lidar); hipFree(dev_fit); hipFree(dev_weights); hipFree(dev_freeCells); hipFree(dev_wallCells); delete occupancyGrid; checkCUDAError("particleFilterFree"); } // Display the occupancy grid __global__ void drawMap(int nPixels, glm::vec3 * image, Geom *objects, Camera cam, MAP_TYPE *occupancyGrid, glm::vec3 scale, glm::vec3 res) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); Geom map = objects[0]; // convert pixel coordinates to map coordates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float) cam.resolution.y / 2.0f); float xc = (x - mid.x + cam.position.x) / zoom; float yc = (y - mid.y + cam.position.y) / zoom; // check if pixel is in the map if (xc < map.scale.x / 2 && xc > -map.scale.x / 2 && yc < map.scale.y / 2 && yc > -map.scale.y / 2) { glm::ivec2 idx( round(0.5f * scale.x / res.x + xc / res.x), round(0.5f * scale.y / res.y + yc / res.y) ); long max_val = 1 << (sizeof(MAP_TYPE)* 8 - 1); float val = ((float)(occupancyGrid[idx.x * (int)(scale.x / res.x) + idx.y] + max_val)) / (max_val*2); image[index] = glm::vec3(val); } else image[index] = glm::vec3(1.0f); } } // Display particles on screen __global__ void drawParticles(glm::vec3 * image, glm::vec4 *particles, Camera cam, glm::vec3 scale, glm::vec3 res) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { // convert map coordinates to pixel coordinates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float)cam.resolution.y / 2.0f); int x = particles[i].x * zoom + mid.x - cam.position.x; int y = particles[i].y * zoom + mid.y - cam.position.y; int l = cam.resolution.x; int index = x + (y * l); image[index] = glm::vec3(0.0f, 1.0f, 1.0f); } } // display a box for robot position void drawRobot(glm::vec3 * image, glm::vec3 robot, Camera cam, glm::vec3 scale, glm::vec3 res) { // convert map coordinates to pixel coordinates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float)cam.resolution.y / 2.0f); int x = robot.x * zoom + mid.x - cam.position.x; int y = robot.y * zoom + mid.y - cam.position.y; int l = cam.resolution.x; int index = x + (y * l); glm::vec3 color(1.0f, 0.0f, 0.0f); glm::vec3 row[3] = { color, color, color }; hipMemcpy(&image[index - 1], row, 3 * sizeof(glm::vec3), hipMemcpyHostToDevice); hipMemcpy(&image[index - 1 + l], row, 3 * sizeof(glm::vec3), hipMemcpyHostToDevice); hipMemcpy(&image[index - 1 - l], row, 3 * sizeof(glm::vec3), hipMemcpyHostToDevice); } // rotates generates 2d point for lidar reading __device__ __host__ void CleanLidarScan(int n, const float scan, const float theta, glm::vec2 &intersection) { float rot = LIDAR_ANGLE(n) + theta; intersection.x = scan * std::cos(rot); intersection.y = scan * std::sin(rot); } //Bresenham's line algorithm for integer grid __device__ __host__ void traceRay(glm::ivec2 start, glm::ivec2 end, int rowLen, bool *out){ glm::ivec2 delta = end - start; // swap for to the right octant bool steep = abs(delta.y) > abs(delta.x); if (steep) { // check slope int temp = start.x; start.x = start.y; start.y = temp; temp = end.x; end.x = end.y; end.y = temp; } if (start.x > end.x){ int temp = start.x; start.x = end.x; end.x = temp; temp = start.y; start.y = end.y; end.y = temp; } int deltax = end.x - start.x; int deltay = abs(end.y - start.y); float error = deltax / 2; int y = start.y; int ystep = (end.y > start.y) ? 1 : -1; // build line for (int x = start.x; x < end.x; x++){ if (steep) out[y*rowLen + x] = 1; else out[x*rowLen + y] = 1; error -= deltay; if (error < 0){ y += ystep; error += deltax; } } } // sum the value of specified points in a 2d map __device__ __host__ int mapCorrelation(int N, const MAP_TYPE *map, glm::ivec2 dim, const glm::vec2 *points) { int retv = 0; for (int i = 0; i < N; i++) { if (points[i].x >= 0 && points[i].x < dim.x && points[i].y >= 0 && points[i].y < dim.y) { int idx = (int)points[i].x * dim.x + (int)points[i].y; retv += map[idx]; } } return retv; } __device__ __host__ int EvaluateParticle(MAP_TYPE *map, glm::ivec2 map_dim, Geom map_params, glm::vec4 &particle, glm::vec3 pos, float *lidar) { // get walls relative to robot position, add particle position glm::vec2 walls[LIDAR_SIZE]; for (int j = 0; j < LIDAR_SIZE; j++) { CleanLidarScan(j, lidar[j], particle.z, walls[j]); walls[j].x += particle.x; walls[j].y += particle.y; // convert to grid idx walls[j].x = round(0.5f * map_params.scale.x / map_params.resolution.x + walls[j].x / map_params.resolution.x); walls[j].y = round(0.5f * map_params.scale.y / map_params.resolution.y + walls[j].y / map_params.resolution.y); } // test the map correlation between global map and walls return mapCorrelation(LIDAR_SIZE, map, map_dim, walls); } // kernel wrapper for calling Evaluate Particle __global__ void kernEvaluateParticles(MAP_TYPE *map, glm::ivec2 map_dim, Geom map_params, glm::vec4 *particles, glm::vec3 pos, float *lidar, int *fit) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { fit[i] = EvaluateParticle(map, map_dim, map_params, particles[i], pos, lidar); } } // simple inplace multiplication kernel __global__ void kernUpdateWeights(int N, glm::vec4 *a, int *b, float c, int min) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { a[i].w = a[i].w * ((float) b[i] - min) * c; } } // update particle cloud weights from measurement glm::vec3 PFMeasurementUpdate(std::vector<float> lidar) { glm::vec3 retv(0.0f); if (GPU_MEASUREMENT) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); // create device copy of fit array and lidar hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice); hipMemset(dev_fit, 0, PARTICLE_COUNT * sizeof(int)); hipDeviceSynchronize(); kernEvaluateParticles << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fit); hipDeviceSynchronize(); checkCUDAError("particle measurement update error"); thrust::device_vector<int> vFit(dev_fit, dev_fit + PARTICLE_COUNT); thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end()); int rng = *result.second - *result.first; int best = result.second - vFit.begin(); // rescale all weights if (rng > 0) { float f = 1 / (float)(rng); kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fit, f, *result.first); hipDeviceSynchronize(); checkCUDAError("particle weight update error"); } // only use best point for return hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyDeviceToHost); retv = (glm::vec3) particles[best]; } else { int best = -128 * LIDAR_SIZE; int worst = 128 * LIDAR_SIZE; int iBest = 0; int fit[PARTICLE_COUNT] = { 0 }; for (int i = 0; i < PARTICLE_COUNT; i++) { fit[i] = EvaluateParticle(occupancyGrid, map_dim, map_params, particles[i], robotPos, &lidar[0]); // track correlation maximums if (fit[i] > best) { best = fit[i]; iBest = i; } if (fit[i] < worst) worst = fit[i]; } // rescale all weights if ((best - worst) > 0) { float f = 1.0f; for (int i = 0; i < PARTICLE_COUNT; i++) { f = (float)(fit[i] - worst) / (float)(best - worst); particles[i].w *= f; } } retv = (glm::vec3) particles[iBest]; } return retv; } // add noise to a single particle __device__ __host__ void ParticleAddNoise(glm::vec4 &particle, int frame, int idx) { float mean[3] = { 0 }; float cov[3] = COV; // covariance: x y theta thrust::default_random_engine e2 = makeSeededRandomEngine(frame, idx, 0); thrust::random::normal_distribution<float> distx(mean[0], cov[0]); thrust::random::normal_distribution<float> disty(mean[1], cov[1]); thrust::random::normal_distribution<float> distt(mean[2], cov[2]); glm::vec4 noise(distx(e2), disty(e2), distt(e2), 0.0f); particle += noise; } // kernel wrapper for adding noise to a particle __global__ void kernAddNoise(glm::vec4 *particles, int frame) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { ParticleAddNoise(particles[i], frame, i); } } // perform a motion update on the particle cloud, adding in gaussian noise void PFMotionUpdate(int frame) { if (GPU_MOTION) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); // sync up host and device arrays for now... hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyHostToDevice); kernAddNoise << <blocksPerGrid1d, blockSize1d >> >(dev_particles, frame); hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyDeviceToHost); hipDeviceSynchronize(); checkCUDAError("particle motion update error"); } else { for (int i = 0; i < PARTICLE_COUNT; i++) ParticleAddNoise(particles[i], frame, i); } } __global__ void kernCopyWeights(glm::vec4 *particles, float *weights, bool squared) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { weights[i] = (squared) ? particles[i].w * particles[i].w : particles[i].w; } } __global__ void kernWeightedSample(glm::vec4 *particles, float *weights, float max, float Neff, int frame) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, i); thrust::random::uniform_real_distribution<float> dist(0, max); int idx = 0; float rnd = dist(gen); while (idx < PARTICLE_COUNT && rnd > weights[idx]) idx++; particles[i] = particles[idx]; particles[i].w = 1.0f; } } // check if particles need to be resampled void PFResample(int frame) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); float r = 0, r2 = 0; if (GPU_RESAMPLE) { kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, true); hipDeviceSynchronize(); thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights); r2 = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT); kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, false); hipDeviceSynchronize(); r = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT); } else { for (int i = 0; i < PARTICLE_COUNT; i++) { r += particles[i].w; r2 += (particles[i].w) * (particles[i].w); } } float Neff = r * r / r2; if (Neff < EFFECTIVE_PARTICLES*PARTICLE_COUNT) { if (GPU_RESAMPLE) { thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights); thrust::inclusive_scan(pWeights, pWeights + PARTICLE_COUNT, pWeights); float max; hipMemcpy(&max, &dev_weights[PARTICLE_COUNT - 1], sizeof(float), hipMemcpyDeviceToHost); kernWeightedSample << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, max, Neff, frame); hipMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyDeviceToHost); checkCUDAError("resample error"); } else { float weightsum[PARTICLE_COUNT]; weightsum[0] = particles[0].w; for (int i = 1; i < PARTICLE_COUNT; i++) { weightsum[i] = weightsum[i - 1] + particles[i].w; } thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, 0); thrust::random::uniform_real_distribution<float> dist(0, weightsum[PARTICLE_COUNT - 1]); for (int i = 0; i < PARTICLE_COUNT; i++) { int idx = 0; float rnd = dist(gen); while (idx < PARTICLE_COUNT && rnd > weightsum[idx]) idx++; particles[i] = particles[idx]; particles[i].w = 1.0f; } } } // push particles to GPU to draw hipMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), hipMemcpyHostToDevice); } __global__ void kernUpdateMap(int N, MAP_TYPE *map, bool *mask, int val) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15; if (mask[i]) map[i] = CLAMP(map[i] + val, -clamp_val, clamp_val); } } __global__ void kernGetWalls(float *lidar, glm::ivec2 center, float theta, bool *freeCells, bool *wallCells, glm::ivec2 map_dim, Geom map_params) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < LIDAR_SIZE) { glm::vec2 walls; CleanLidarScan(i, lidar[i], theta, walls); walls.x = round(walls.x / map_params.resolution.x); walls.y = round(walls.y / map_params.resolution.y); walls += center; if (walls.x >= 0 && walls.x < map_dim.x && walls.y >= 0 && walls.y < map_dim.y) { traceRay(center, walls, map_dim.x, freeCells); wallCells[(int) (walls.x * map_dim.x + walls.y)] = true; } } } void PFUpdateMap(std::vector<float> lidar) { glm::ivec2 center_idx( round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2), round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2) ); long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15; if (GPU_MAP) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d); const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d); // find occupancy grid cells from translated lidar hipMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool)); hipMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool)); hipMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), hipMemcpyHostToDevice); // find intersections from lidar scan kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params); // Update free/occupied weights kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, FREE_WEIGHT); kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_wallCells, OCCUPIED_WEIGHT); } else { // find occupancy grid cells from translated lidar bool *freeCells = new bool[map_dim.x * map_dim.y]; memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool)); // find intersections from lidar scan glm::vec2 walls[LIDAR_SIZE]; for (int i = 0; i < LIDAR_SIZE; i++) { CleanLidarScan(i, lidar[i], robotPos.z, walls[i]); walls[i].x = round(walls[i].x / map_params.resolution.x); walls[i].y = round(walls[i].y / map_params.resolution.y); walls[i] += center_idx; if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) { traceRay(center_idx, walls[i], map_dim.x, freeCells); } } // downweight free cells for (int i = 0; i < map_dim.x; i++) { for (int j = 0; j < map_dim.y; j++) { int idx = i*map_dim.x + j; if (freeCells[idx]) { occupancyGrid[idx] += FREE_WEIGHT; occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val); } } } // upweight occupied cells for (int i = 0; i < LIDAR_SIZE; i++) { if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) { int idx = (int)walls[i].x * map_dim.x + (int)walls[i].y; occupancyGrid[idx] += OCCUPIED_WEIGHT; occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val); } } // push grid to GPU to draw hipMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(char), hipMemcpyHostToDevice); delete freeCells; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void particleFilter(uchar4 *pbo, int frame, Lidar *lidar) { if (frame % 100 == 0) { avg_motion = 0.0f; avg_measurement = 0.0f; avg_map = 0.0f; avg_sample = 0.0f; } std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); PFMotionUpdate(frame); end = std::chrono::system_clock::now(); avg_motion += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; robotPos = PFMeasurementUpdate(lidar->scans[frame]); end = std::chrono::system_clock::now(); avg_measurement += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; PFUpdateMap(lidar->scans[frame]); end = std::chrono::system_clock::now(); avg_map += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; PFResample(frame); end = std::chrono::system_clock::now(); avg_sample += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; if (frame % 100 == -1) { cout << "Frame " << frame << ":" << endl; printf(" motion: %3.2f\n", avg_motion / 100.0f); printf(" measurement: %3.2f\n", avg_measurement / 100.0f); printf(" map: %3.2f\n", avg_map / 100.0f); printf(" resample: %3.2f\n", avg_sample / 100.0f); } } void drawMap(uchar4 *pbo) { const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating pixels in camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); drawMap << <blocksPerGrid2d, blockSize2d >> >(pixelcount, dev_image, dev_geoms, cam, dev_occupancyGrid, map_params.scale, map_params.resolution); drawParticles << <blocksPerGrid1d, blockSize1d >> > (dev_image, dev_particles, cam, map_params.scale, map_params.resolution); drawRobot(dev_image, robotPos, cam, map_params.scale, map_params.resolution); checkCUDAError("draw screen error"); // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, 1, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); }
437523bee3dcdbcc459a9ecb619e03fc055b0a5e.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <chrono> #include <random> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/partition.h> #include "intersections.h" #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "kernel.h" #define GPU_MOTION 1 #define GPU_MEASUREMENT 1 #define GPU_MAP 1 #define GPU_RESAMPLE 1 #define LIDAR_ANGLE(i) (-135.0f + i * .25f) * PI / 180 #define LIDAR_SIZE 1081 #define PARTICLE_COUNT 5000 #define COV {0.015, 0.015, .015} #define EFFECTIVE_PARTICLES .7 #define MAP_TYPE char #define FREE_WEIGHT -1 #define OCCUPIED_WEIGHT 3 #define BLOCK_SIZE 128 #define ERRORCHECK 1 #define CLAMP(a, lo, hi) (a < lo) ? lo : (a > hi) ? hi : a #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; // host variables static MAP_TYPE *occupancyGrid = NULL; static glm::vec4 particles[PARTICLE_COUNT]; static glm::ivec2 map_dim; static Geom map_params; static glm::vec3 robotPos; // device variable static MAP_TYPE *dev_occupancyGrid = NULL; static glm::vec4 *dev_particles = NULL; static int *dev_fit = NULL; static float *dev_lidar = NULL; static float *dev_weights = NULL; static bool *dev_freeCells = NULL; static bool *dev_wallCells = NULL; // timers float avg_motion = 0.0f, avg_measurement = 0.0f, avg_map = 0.0f, avg_sample = 0.0f; void particleFilterInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); map_params = scene->geoms[0]; map_dim = glm::ivec2(map_params.scale.x / map_params.resolution.x, map_params.scale.y / map_params.resolution.y); occupancyGrid = new MAP_TYPE[map_dim.x*map_dim.y]; long max_val = 1 << (sizeof(MAP_TYPE) * 8 - 1); memset(occupancyGrid, -1*(max_val-1)*0, map_dim.x*map_dim.y*sizeof(MAP_TYPE)); //particles = new glm::vec4[PARTICLE_COUNT]; for (int i = 0; i < PARTICLE_COUNT; i++) { particles[i] = glm::vec4(0.0f, 0.0f, 0.0f, 1.0f); } robotPos = glm::vec3(0.0f); cudaMalloc(&dev_occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE)); cudaMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(MAP_TYPE), cudaMemcpyHostToDevice); cudaMalloc(&dev_particles, PARTICLE_COUNT * sizeof(glm::vec4)); cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_fit, PARTICLE_COUNT * sizeof(int)); cudaMalloc((void**)&dev_weights, PARTICLE_COUNT * sizeof(float)); cudaMalloc((void**)&dev_lidar, LIDAR_SIZE * sizeof(float)); cudaMalloc((void**)&dev_freeCells, map_dim.x * map_dim.y * sizeof(bool)); cudaMalloc((void**)&dev_wallCells, map_dim.x * map_dim.y * sizeof(bool)); checkCUDAError("particleFilterInit"); } void particleFilterFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_geoms); cudaFree(dev_occupancyGrid); cudaFree(dev_particles); cudaFree(dev_lidar); cudaFree(dev_fit); cudaFree(dev_weights); cudaFree(dev_freeCells); cudaFree(dev_wallCells); delete occupancyGrid; checkCUDAError("particleFilterFree"); } // Display the occupancy grid __global__ void drawMap(int nPixels, glm::vec3 * image, Geom *objects, Camera cam, MAP_TYPE *occupancyGrid, glm::vec3 scale, glm::vec3 res) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); Geom map = objects[0]; // convert pixel coordinates to map coordates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float) cam.resolution.y / 2.0f); float xc = (x - mid.x + cam.position.x) / zoom; float yc = (y - mid.y + cam.position.y) / zoom; // check if pixel is in the map if (xc < map.scale.x / 2 && xc > -map.scale.x / 2 && yc < map.scale.y / 2 && yc > -map.scale.y / 2) { glm::ivec2 idx( round(0.5f * scale.x / res.x + xc / res.x), round(0.5f * scale.y / res.y + yc / res.y) ); long max_val = 1 << (sizeof(MAP_TYPE)* 8 - 1); float val = ((float)(occupancyGrid[idx.x * (int)(scale.x / res.x) + idx.y] + max_val)) / (max_val*2); image[index] = glm::vec3(val); } else image[index] = glm::vec3(1.0f); } } // Display particles on screen __global__ void drawParticles(glm::vec3 * image, glm::vec4 *particles, Camera cam, glm::vec3 scale, glm::vec3 res) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { // convert map coordinates to pixel coordinates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float)cam.resolution.y / 2.0f); int x = particles[i].x * zoom + mid.x - cam.position.x; int y = particles[i].y * zoom + mid.y - cam.position.y; int l = cam.resolution.x; int index = x + (y * l); image[index] = glm::vec3(0.0f, 1.0f, 1.0f); } } // display a box for robot position void drawRobot(glm::vec3 * image, glm::vec3 robot, Camera cam, glm::vec3 scale, glm::vec3 res) { // convert map coordinates to pixel coordinates float zoom = cam.position.z; glm::vec2 mid((float)cam.resolution.x / 2.0f, (float)cam.resolution.y / 2.0f); int x = robot.x * zoom + mid.x - cam.position.x; int y = robot.y * zoom + mid.y - cam.position.y; int l = cam.resolution.x; int index = x + (y * l); glm::vec3 color(1.0f, 0.0f, 0.0f); glm::vec3 row[3] = { color, color, color }; cudaMemcpy(&image[index - 1], row, 3 * sizeof(glm::vec3), cudaMemcpyHostToDevice); cudaMemcpy(&image[index - 1 + l], row, 3 * sizeof(glm::vec3), cudaMemcpyHostToDevice); cudaMemcpy(&image[index - 1 - l], row, 3 * sizeof(glm::vec3), cudaMemcpyHostToDevice); } // rotates generates 2d point for lidar reading __device__ __host__ void CleanLidarScan(int n, const float scan, const float theta, glm::vec2 &intersection) { float rot = LIDAR_ANGLE(n) + theta; intersection.x = scan * std::cos(rot); intersection.y = scan * std::sin(rot); } //Bresenham's line algorithm for integer grid __device__ __host__ void traceRay(glm::ivec2 start, glm::ivec2 end, int rowLen, bool *out){ glm::ivec2 delta = end - start; // swap for to the right octant bool steep = abs(delta.y) > abs(delta.x); if (steep) { // check slope int temp = start.x; start.x = start.y; start.y = temp; temp = end.x; end.x = end.y; end.y = temp; } if (start.x > end.x){ int temp = start.x; start.x = end.x; end.x = temp; temp = start.y; start.y = end.y; end.y = temp; } int deltax = end.x - start.x; int deltay = abs(end.y - start.y); float error = deltax / 2; int y = start.y; int ystep = (end.y > start.y) ? 1 : -1; // build line for (int x = start.x; x < end.x; x++){ if (steep) out[y*rowLen + x] = 1; else out[x*rowLen + y] = 1; error -= deltay; if (error < 0){ y += ystep; error += deltax; } } } // sum the value of specified points in a 2d map __device__ __host__ int mapCorrelation(int N, const MAP_TYPE *map, glm::ivec2 dim, const glm::vec2 *points) { int retv = 0; for (int i = 0; i < N; i++) { if (points[i].x >= 0 && points[i].x < dim.x && points[i].y >= 0 && points[i].y < dim.y) { int idx = (int)points[i].x * dim.x + (int)points[i].y; retv += map[idx]; } } return retv; } __device__ __host__ int EvaluateParticle(MAP_TYPE *map, glm::ivec2 map_dim, Geom map_params, glm::vec4 &particle, glm::vec3 pos, float *lidar) { // get walls relative to robot position, add particle position glm::vec2 walls[LIDAR_SIZE]; for (int j = 0; j < LIDAR_SIZE; j++) { CleanLidarScan(j, lidar[j], particle.z, walls[j]); walls[j].x += particle.x; walls[j].y += particle.y; // convert to grid idx walls[j].x = round(0.5f * map_params.scale.x / map_params.resolution.x + walls[j].x / map_params.resolution.x); walls[j].y = round(0.5f * map_params.scale.y / map_params.resolution.y + walls[j].y / map_params.resolution.y); } // test the map correlation between global map and walls return mapCorrelation(LIDAR_SIZE, map, map_dim, walls); } // kernel wrapper for calling Evaluate Particle __global__ void kernEvaluateParticles(MAP_TYPE *map, glm::ivec2 map_dim, Geom map_params, glm::vec4 *particles, glm::vec3 pos, float *lidar, int *fit) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { fit[i] = EvaluateParticle(map, map_dim, map_params, particles[i], pos, lidar); } } // simple inplace multiplication kernel __global__ void kernUpdateWeights(int N, glm::vec4 *a, int *b, float c, int min) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { a[i].w = a[i].w * ((float) b[i] - min) * c; } } // update particle cloud weights from measurement glm::vec3 PFMeasurementUpdate(std::vector<float> lidar) { glm::vec3 retv(0.0f); if (GPU_MEASUREMENT) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); // create device copy of fit array and lidar cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice); cudaMemset(dev_fit, 0, PARTICLE_COUNT * sizeof(int)); cudaDeviceSynchronize(); kernEvaluateParticles << <blocksPerGrid1d, blockSize1d >> >(dev_occupancyGrid, map_dim, map_params, dev_particles, robotPos, dev_lidar, dev_fit); cudaDeviceSynchronize(); checkCUDAError("particle measurement update error"); thrust::device_vector<int> vFit(dev_fit, dev_fit + PARTICLE_COUNT); thrust::pair<thrust::device_vector<int>::iterator, thrust::device_vector<int>::iterator> result = thrust::minmax_element(vFit.begin(), vFit.end()); int rng = *result.second - *result.first; int best = result.second - vFit.begin(); // rescale all weights if (rng > 0) { float f = 1 / (float)(rng); kernUpdateWeights << <blocksPerGrid1d, blockSize1d >> >(PARTICLE_COUNT, dev_particles, dev_fit, f, *result.first); cudaDeviceSynchronize(); checkCUDAError("particle weight update error"); } // only use best point for return cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyDeviceToHost); retv = (glm::vec3) particles[best]; } else { int best = -128 * LIDAR_SIZE; int worst = 128 * LIDAR_SIZE; int iBest = 0; int fit[PARTICLE_COUNT] = { 0 }; for (int i = 0; i < PARTICLE_COUNT; i++) { fit[i] = EvaluateParticle(occupancyGrid, map_dim, map_params, particles[i], robotPos, &lidar[0]); // track correlation maximums if (fit[i] > best) { best = fit[i]; iBest = i; } if (fit[i] < worst) worst = fit[i]; } // rescale all weights if ((best - worst) > 0) { float f = 1.0f; for (int i = 0; i < PARTICLE_COUNT; i++) { f = (float)(fit[i] - worst) / (float)(best - worst); particles[i].w *= f; } } retv = (glm::vec3) particles[iBest]; } return retv; } // add noise to a single particle __device__ __host__ void ParticleAddNoise(glm::vec4 &particle, int frame, int idx) { float mean[3] = { 0 }; float cov[3] = COV; // covariance: x y theta thrust::default_random_engine e2 = makeSeededRandomEngine(frame, idx, 0); thrust::random::normal_distribution<float> distx(mean[0], cov[0]); thrust::random::normal_distribution<float> disty(mean[1], cov[1]); thrust::random::normal_distribution<float> distt(mean[2], cov[2]); glm::vec4 noise(distx(e2), disty(e2), distt(e2), 0.0f); particle += noise; } // kernel wrapper for adding noise to a particle __global__ void kernAddNoise(glm::vec4 *particles, int frame) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { ParticleAddNoise(particles[i], frame, i); } } // perform a motion update on the particle cloud, adding in gaussian noise void PFMotionUpdate(int frame) { if (GPU_MOTION) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); // sync up host and device arrays for now... cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyHostToDevice); kernAddNoise << <blocksPerGrid1d, blockSize1d >> >(dev_particles, frame); cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); checkCUDAError("particle motion update error"); } else { for (int i = 0; i < PARTICLE_COUNT; i++) ParticleAddNoise(particles[i], frame, i); } } __global__ void kernCopyWeights(glm::vec4 *particles, float *weights, bool squared) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { weights[i] = (squared) ? particles[i].w * particles[i].w : particles[i].w; } } __global__ void kernWeightedSample(glm::vec4 *particles, float *weights, float max, float Neff, int frame) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < PARTICLE_COUNT) { thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, i); thrust::random::uniform_real_distribution<float> dist(0, max); int idx = 0; float rnd = dist(gen); while (idx < PARTICLE_COUNT && rnd > weights[idx]) idx++; particles[i] = particles[idx]; particles[i].w = 1.0f; } } // check if particles need to be resampled void PFResample(int frame) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); float r = 0, r2 = 0; if (GPU_RESAMPLE) { kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, true); cudaDeviceSynchronize(); thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights); r2 = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT); kernCopyWeights << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, false); cudaDeviceSynchronize(); r = thrust::reduce(pWeights, pWeights + PARTICLE_COUNT); } else { for (int i = 0; i < PARTICLE_COUNT; i++) { r += particles[i].w; r2 += (particles[i].w) * (particles[i].w); } } float Neff = r * r / r2; if (Neff < EFFECTIVE_PARTICLES*PARTICLE_COUNT) { if (GPU_RESAMPLE) { thrust::device_ptr<float> pWeights = thrust::device_pointer_cast(dev_weights); thrust::inclusive_scan(pWeights, pWeights + PARTICLE_COUNT, pWeights); float max; cudaMemcpy(&max, &dev_weights[PARTICLE_COUNT - 1], sizeof(float), cudaMemcpyDeviceToHost); kernWeightedSample << <blocksPerGrid1d, blockSize1d >> >(dev_particles, dev_weights, max, Neff, frame); cudaMemcpy(particles, dev_particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyDeviceToHost); checkCUDAError("resample error"); } else { float weightsum[PARTICLE_COUNT]; weightsum[0] = particles[0].w; for (int i = 1; i < PARTICLE_COUNT; i++) { weightsum[i] = weightsum[i - 1] + particles[i].w; } thrust::default_random_engine gen = makeSeededRandomEngine(Neff, frame, 0); thrust::random::uniform_real_distribution<float> dist(0, weightsum[PARTICLE_COUNT - 1]); for (int i = 0; i < PARTICLE_COUNT; i++) { int idx = 0; float rnd = dist(gen); while (idx < PARTICLE_COUNT && rnd > weightsum[idx]) idx++; particles[i] = particles[idx]; particles[i].w = 1.0f; } } } // push particles to GPU to draw cudaMemcpy(dev_particles, particles, PARTICLE_COUNT * sizeof(glm::vec4), cudaMemcpyHostToDevice); } __global__ void kernUpdateMap(int N, MAP_TYPE *map, bool *mask, int val) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < N) { long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15; if (mask[i]) map[i] = CLAMP(map[i] + val, -clamp_val, clamp_val); } } __global__ void kernGetWalls(float *lidar, glm::ivec2 center, float theta, bool *freeCells, bool *wallCells, glm::ivec2 map_dim, Geom map_params) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; if (i < LIDAR_SIZE) { glm::vec2 walls; CleanLidarScan(i, lidar[i], theta, walls); walls.x = round(walls.x / map_params.resolution.x); walls.y = round(walls.y / map_params.resolution.y); walls += center; if (walls.x >= 0 && walls.x < map_dim.x && walls.y >= 0 && walls.y < map_dim.y) { traceRay(center, walls, map_dim.x, freeCells); wallCells[(int) (walls.x * map_dim.x + walls.y)] = true; } } } void PFUpdateMap(std::vector<float> lidar) { glm::ivec2 center_idx( round(0.5f * map_dim.x + robotPos.x / map_params.resolution.x + map_params.resolution.x / 2), round(0.5f * map_dim.y + robotPos.y / map_params.resolution.y + map_params.resolution.y / 2) ); long clamp_val = (1 << (sizeof(MAP_TYPE)* 8 - 1)) - 15; if (GPU_MAP) { // 1D block for particles const int blockSize1d = 128; const dim3 blocksPerGridLidar((LIDAR_SIZE + blockSize1d - 1) / blockSize1d); const dim3 blocksPerGridMap((map_dim.x * map_dim.y + blockSize1d - 1) / blockSize1d); // find occupancy grid cells from translated lidar cudaMemset(dev_freeCells, 0, map_dim.x * map_dim.y*sizeof(bool)); cudaMemset(dev_wallCells, 0, map_dim.x * map_dim.y*sizeof(bool)); cudaMemcpy(dev_lidar, &lidar[0], LIDAR_SIZE * sizeof(float), cudaMemcpyHostToDevice); // find intersections from lidar scan kernGetWalls << <blocksPerGridLidar, blockSize1d >> >(dev_lidar, center_idx, robotPos.z, dev_freeCells, dev_wallCells, map_dim, map_params); // Update free/occupied weights kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_freeCells, FREE_WEIGHT); kernUpdateMap << <blocksPerGridMap, blockSize1d >> >(map_dim.x * map_dim.y, dev_occupancyGrid, dev_wallCells, OCCUPIED_WEIGHT); } else { // find occupancy grid cells from translated lidar bool *freeCells = new bool[map_dim.x * map_dim.y]; memset(freeCells, 0, map_dim.x * map_dim.y*sizeof(bool)); // find intersections from lidar scan glm::vec2 walls[LIDAR_SIZE]; for (int i = 0; i < LIDAR_SIZE; i++) { CleanLidarScan(i, lidar[i], robotPos.z, walls[i]); walls[i].x = round(walls[i].x / map_params.resolution.x); walls[i].y = round(walls[i].y / map_params.resolution.y); walls[i] += center_idx; if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) { traceRay(center_idx, walls[i], map_dim.x, freeCells); } } // downweight free cells for (int i = 0; i < map_dim.x; i++) { for (int j = 0; j < map_dim.y; j++) { int idx = i*map_dim.x + j; if (freeCells[idx]) { occupancyGrid[idx] += FREE_WEIGHT; occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val); } } } // upweight occupied cells for (int i = 0; i < LIDAR_SIZE; i++) { if (walls[i].x >= 0 && walls[i].x < map_dim.x && walls[i].y >= 0 && walls[i].y < map_dim.y) { int idx = (int)walls[i].x * map_dim.x + (int)walls[i].y; occupancyGrid[idx] += OCCUPIED_WEIGHT; occupancyGrid[idx] = CLAMP(occupancyGrid[idx], -clamp_val, clamp_val); } } // push grid to GPU to draw cudaMemcpy(dev_occupancyGrid, occupancyGrid, map_dim.x*map_dim.y * sizeof(char), cudaMemcpyHostToDevice); delete freeCells; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void particleFilter(uchar4 *pbo, int frame, Lidar *lidar) { if (frame % 100 == 0) { avg_motion = 0.0f; avg_measurement = 0.0f; avg_map = 0.0f; avg_sample = 0.0f; } std::chrono::time_point<std::chrono::system_clock> start, end; start = std::chrono::system_clock::now(); PFMotionUpdate(frame); end = std::chrono::system_clock::now(); avg_motion += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; robotPos = PFMeasurementUpdate(lidar->scans[frame]); end = std::chrono::system_clock::now(); avg_measurement += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; PFUpdateMap(lidar->scans[frame]); end = std::chrono::system_clock::now(); avg_map += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; PFResample(frame); end = std::chrono::system_clock::now(); avg_sample += (std::chrono::duration_cast<std::chrono::microseconds> (end - start)).count(); start = end; if (frame % 100 == -1) { cout << "Frame " << frame << ":" << endl; printf(" motion: %3.2f\n", avg_motion / 100.0f); printf(" measurement: %3.2f\n", avg_measurement / 100.0f); printf(" map: %3.2f\n", avg_map / 100.0f); printf(" resample: %3.2f\n", avg_sample / 100.0f); } } void drawMap(uchar4 *pbo) { const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating pixels in camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); const int blockSize1d = 128; const dim3 blocksPerGrid1d((PARTICLE_COUNT + blockSize1d - 1) / blockSize1d); drawMap << <blocksPerGrid2d, blockSize2d >> >(pixelcount, dev_image, dev_geoms, cam, dev_occupancyGrid, map_params.scale, map_params.resolution); drawParticles << <blocksPerGrid1d, blockSize1d >> > (dev_image, dev_particles, cam, map_params.scale, map_params.resolution); drawRobot(dev_image, robotPos, cam, map_params.scale, map_params.resolution); checkCUDAError("draw screen error"); // Send results to OpenGL buffer for rendering sendImageToPBO << <blocksPerGrid2d, blockSize2d >> >(pbo, cam.resolution, 1, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); }
a1008a3a3b588440c0a0085a27e347dd97958a42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::add(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::add() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD); layers.push_back(ele); return ele; } Tensor FFModel::subtract(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::subtract() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB); layers.push_back(ele); return ele; } Tensor FFModel::multiply(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::multiply() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL); layers.push_back(ele); return ele; } Tensor FFModel::divide(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::divide() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV); layers.push_back(ele); return ele; } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type, const Tensor& in1, const Tensor& in2) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), in1, in2), op_type(_op_type) { //TODO: implement broadcast op assert(in1.numDim == in2.numDim); int dim = in1.numDim; outputs[0].numDim = in1.numDim; for (int i = 0; i < dim; i++) { assert(in1.adim[i] == in2.adim[i]); outputs[0].adim[i] = in1.adim[i]; } } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), 2), op_type(_op_type) { } Tensor ElementBinary::init_inout(FFModel& model, const Tensor& input) { // TODO: currently disable this functional API since // FlexFlow assumes a single tensor as input assert(false); Tensor in1 = input, in2 = input; inputs[0] = in1; inputs[1] = in2; create_output_and_partition(model); return outputs[0]; } /* void ElementBinary::add_to_model(FFModel& model) { model.layers.push_back(this); } */ void ElementBinary::create_weights(FFModel& model) { // Do nothing } void ElementBinary::create_output_and_partition(FFModel& model) { //TODO: implement broadcast op assert(inputs[0].numDim == inputs[1].numDim); int dim = inputs[0].numDim; for (int i = 0; i < dim; i++) assert(inputs[0].adim[i] == inputs[1].adim[i]); switch (dim) { case 1: { task_is = model.get_or_create_task_is(1, name); create_output_and_partition_with_dim<1>(model); break; } case 2: { task_is = model.get_or_create_task_is(2, name); create_output_and_partition_with_dim<2>(model); break; } case 3: { task_is = model.get_or_create_task_is(3, name); create_output_and_partition_with_dim<3>(model); break; } case 4: { task_is = model.get_or_create_task_is(4, name); create_output_and_partition_with_dim<4>(model); break; } default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void ElementBinary::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; for (int i = 0; i < 2; i++) { input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]); } } } __host__ void ElementBinary::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) {} void ElementBinary::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_forward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* in1, const float* in2, float* out) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { out[i] = alpha * (in1[i] + in2[i]) + beta * out[i]; break; } case ElementBinary::OP_SUB: { out[i] = alpha * (in1[i] - in2[i]) + beta * out[i]; break; } case ElementBinary::OP_MUL: { out[i] = alpha * in1[i] * in2[i] + beta * out[i]; break; } case ElementBinary::OP_DIV: { out[i] = alpha * (in1[i] / in2[i]) + beta * out[i]; break; } default: assert(false); } } } /* regions[0](I): in1 regions[1](I): in2 regions[2](O): output */ __host__ void ElementBinary::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; float beta = 0.0f; assert(regions.size() == 3); assert(task->regions.size() == 3); const ElementBinary* ele = (const ElementBinary*) task->args; Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(in1_domain == in2_domain); assert(out_domain == in1_domain); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); hipLaunchKernelGGL(( elewise_binary_forward_kernel), dim3(GET_BLOCKS(out_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr); } void ElementBinary::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_backward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* out_grad, const float* in1, const float* in2, float* in1_grad, float* in2_grad) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_SUB: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_MUL: { in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_DIV: { in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i]; break; } default: assert(false); } } } /* regions[0](I): out_grad regions[1](I): in0 regions[2](I): in1 regions[3](I/O): in0_grad regions[4](I/O): in1_grad */ void ElementBinary::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; const ElementBinary* ele = (const ElementBinary*) task->args; assert(regions.size() == 5); assert(task->regions.size() == 5); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in0_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Domain in0_grad_domain = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Domain in1_grad_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); assert(out_grad_domain == in0_domain); assert(out_grad_domain == in1_domain); assert(out_grad_domain == in0_grad_domain); assert(out_grad_domain == in1_grad_domain); const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); float* in1_grad_ptr = helperGetTensorPointerRW<float>( regions[3], task->regions[3], FID_DATA, ctx, runtime); float* in2_grad_ptr = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); hipLaunchKernelGGL(( elewise_binary_backward_kernel), dim3(GET_BLOCKS(out_grad_domain.get_volume())), dim3(CUDA_NUM_THREADS), 0, 0, out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr, in1_grad_ptr, in2_grad_ptr); } void ElementBinary::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): input0 launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2](I): input1 launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): input0_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I/O): input1_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region_grad)); launcher.add_field(4, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool ElementBinary::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
a1008a3a3b588440c0a0085a27e347dd97958a42.cu
/* Copyright 2020 Stanford * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::add(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_ADD, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::add() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_ADD); layers.push_back(ele); return ele; } Tensor FFModel::subtract(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_SUB, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::subtract() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_SUB); layers.push_back(ele); return ele; } Tensor FFModel::multiply(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_MUL, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::multiply() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_MUL); layers.push_back(ele); return ele; } Tensor FFModel::divide(const Tensor& in1, const Tensor& in2) { ElementBinary *ele = new ElementBinary(*this, ElementBinary::OP_DIV, in1, in2); layers.push_back(ele); return ele->outputs[0]; } ElementBinary* FFModel::divide() { ElementBinary* ele = new ElementBinary(*this, ElementBinary::OP_DIV); layers.push_back(ele); return ele; } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type, const Tensor& in1, const Tensor& in2) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), in1, in2), op_type(_op_type) { //TODO: implement broadcast op assert(in1.numDim == in2.numDim); int dim = in1.numDim; outputs[0].numDim = in1.numDim; for (int i = 0; i < dim; i++) { assert(in1.adim[i] == in2.adim[i]); outputs[0].adim[i] = in1.adim[i]; } } ElementBinary::ElementBinary(FFModel& model, ElementBinary::OpType _op_type) : Op(model, OP_ELEMENTWISE, "ElementBinary_"+std::to_string(_op_type), 2), op_type(_op_type) { } Tensor ElementBinary::init_inout(FFModel& model, const Tensor& input) { // TODO: currently disable this functional API since // FlexFlow assumes a single tensor as input assert(false); Tensor in1 = input, in2 = input; inputs[0] = in1; inputs[1] = in2; create_output_and_partition(model); return outputs[0]; } /* void ElementBinary::add_to_model(FFModel& model) { model.layers.push_back(this); } */ void ElementBinary::create_weights(FFModel& model) { // Do nothing } void ElementBinary::create_output_and_partition(FFModel& model) { //TODO: implement broadcast op assert(inputs[0].numDim == inputs[1].numDim); int dim = inputs[0].numDim; for (int i = 0; i < dim; i++) assert(inputs[0].adim[i] == inputs[1].adim[i]); switch (dim) { case 1: { task_is = model.get_or_create_task_is(1, name); create_output_and_partition_with_dim<1>(model); break; } case 2: { task_is = model.get_or_create_task_is(2, name); create_output_and_partition_with_dim<2>(model); break; } case 3: { task_is = model.get_or_create_task_is(3, name); create_output_and_partition_with_dim<3>(model); break; } case 4: { task_is = model.get_or_create_task_is(4, name); create_output_and_partition_with_dim<4>(model); break; } default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void ElementBinary::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, name)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = inputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, IndexSpaceT<NDIM>(task_is), DT_FLOAT); outputs[0].owner_op = this; outputs[0].owner_idx = 0; Rect<NDIM> input_rect; for (int i = 0; i < 2; i++) { input_rect = runtime->get_index_partition_color_space( ctx, inputs[i].part.get_index_partition()); if (input_rect == part_rect) { input_lps[i] = inputs[i].part; input_grad_lps[i] = inputs[i].part_grad; } else { model.create_disjoint_partition<NDIM>( inputs[i], IndexSpaceT<NDIM>(task_is), input_lps[i], input_grad_lps[i]); } } } __host__ void ElementBinary::init_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) {} void ElementBinary::init(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_forward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* in1, const float* in2, float* out) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { out[i] = alpha * (in1[i] + in2[i]) + beta * out[i]; break; } case ElementBinary::OP_SUB: { out[i] = alpha * (in1[i] - in2[i]) + beta * out[i]; break; } case ElementBinary::OP_MUL: { out[i] = alpha * in1[i] * in2[i] + beta * out[i]; break; } case ElementBinary::OP_DIV: { out[i] = alpha * (in1[i] / in2[i]) + beta * out[i]; break; } default: assert(false); } } } /* regions[0](I): in1 regions[1](I): in2 regions[2](O): output */ __host__ void ElementBinary::forward_task(const Task* task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; float beta = 0.0f; assert(regions.size() == 3); assert(task->regions.size() == 3); const ElementBinary* ele = (const ElementBinary*) task->args; Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in2_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); assert(in1_domain == in2_domain); assert(out_domain == in1_domain); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); float* out_ptr = helperGetTensorPointerWO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); elewise_binary_forward_kernel<<<GET_BLOCKS(out_domain.get_volume()), CUDA_NUM_THREADS>>>( out_domain.get_volume(), alpha, beta, ele->op_type, in1_ptr, in2_ptr, out_ptr); } void ElementBinary::forward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_FWD_TASK_ID, task_is, TaskArgument(this, sizeof(ElementBinary)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(2, FID_DATA); runtime->execute_index_space(ctx, launcher); } __global__ void elewise_binary_backward_kernel(coord_t volume, const float alpha, const float beta, ElementBinary::OpType type, const float* out_grad, const float* in1, const float* in2, float* in1_grad, float* in2_grad) { CUDA_KERNEL_LOOP(i, volume) { switch (type) { case ElementBinary::OP_ADD: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_SUB: { in1_grad[i] = alpha * out_grad[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_MUL: { in1_grad[i] = alpha * out_grad[i] * in2[i] + beta * in1_grad[i]; in2_grad[i] = alpha * out_grad[i] * in1[i] + beta * in2_grad[i]; break; } case ElementBinary::OP_DIV: { in1_grad[i] = alpha * out_grad[i] / in2[i] + beta * in1_grad[i]; in2_grad[i] = - alpha * out_grad[i] * in1[i] / (in2[i] * in2[i]) + beta * in2_grad[i]; break; } default: assert(false); } } } /* regions[0](I): out_grad regions[1](I): in0 regions[2](I): in1 regions[3](I/O): in0_grad regions[4](I/O): in1_grad */ void ElementBinary::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { float alpha = 1.0f; const ElementBinary* ele = (const ElementBinary*) task->args; assert(regions.size() == 5); assert(task->regions.size() == 5); Domain out_grad_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); Domain in0_domain = runtime->get_index_space_domain( ctx, task->regions[1].region.get_index_space()); Domain in1_domain = runtime->get_index_space_domain( ctx, task->regions[2].region.get_index_space()); Domain in0_grad_domain = runtime->get_index_space_domain( ctx, task->regions[3].region.get_index_space()); Domain in1_grad_domain = runtime->get_index_space_domain( ctx, task->regions[4].region.get_index_space()); assert(out_grad_domain == in0_domain); assert(out_grad_domain == in1_domain); assert(out_grad_domain == in0_grad_domain); assert(out_grad_domain == in1_grad_domain); const float* out_grad_ptr = helperGetTensorPointerRO<float>( regions[0], task->regions[0], FID_DATA, ctx, runtime); const float* in1_ptr = helperGetTensorPointerRO<float>( regions[1], task->regions[1], FID_DATA, ctx, runtime); const float* in2_ptr = helperGetTensorPointerRO<float>( regions[2], task->regions[2], FID_DATA, ctx, runtime); float* in1_grad_ptr = helperGetTensorPointerRW<float>( regions[3], task->regions[3], FID_DATA, ctx, runtime); float* in2_grad_ptr = helperGetTensorPointerRW<float>( regions[4], task->regions[4], FID_DATA, ctx, runtime); elewise_binary_backward_kernel<<<GET_BLOCKS(out_grad_domain.get_volume()), CUDA_NUM_THREADS>>>( out_grad_domain.get_volume(), alpha, alpha, ele->op_type, out_grad_ptr, in1_ptr, in2_ptr, in1_grad_ptr, in2_grad_ptr); } void ElementBinary::backward(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; IndexLauncher launcher(ELEMENTBINARY_BWD_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); // regions[0](I): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(0, FID_DATA); // regions[1](I): input0 launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(1, FID_DATA); // regions[2](I): input1 launcher.add_region_requirement( RegionRequirement(input_lps[1], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[1].region)); launcher.add_field(2, FID_DATA); // regions[3](I/O): input0_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(3, FID_DATA); // regions[4](I/O): input1_grad launcher.add_region_requirement( RegionRequirement(input_grad_lps[1], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[1].region_grad)); launcher.add_field(4, FID_DATA); runtime->execute_index_space(ctx, launcher); } bool ElementBinary::measure_compute_time(Simulator* sim, const ParallelConfig& pc, float& forward_time, float& backward_time) { //TODO: implement measure_forward return false; }
fa94f68a8c04453b49655ed047566c8df81ec186.hip
// !!! This is a file automatically generated by hipify!!! // includes, system #include <iostream> #include <fstream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <hip/hip_runtime.h> // includes, project #include <hip/device_functions.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //define parameter #define pi 3.1415 #define MAXCUDADEVICES 1 #define threadNum 246 __device__ int numZ_Dev, numX_Dev, datalength_Dev, transNum_Dev, pickLength_Dev, lgLength_Dev, inhibitor_Dev; __device__ float pitch_Dev, pSize_Dev, sampFreq_Dev, acoustVel_Dev, angleAperture_Dev, delayCoef_Dev; int numZ_Host, numX_Host, datalength_Host, transNum_Host, pickLength_Host, lgLength_Host, inhibitor_Host; float pitch_Host, pSize_Host, sampFreq_Host, acoustVel_Host, angleAperture_Host, delayCoef_Host; float *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag; int *krev; float *w_real, *w_imag; // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraIntDev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq // paraFloatDev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host) { printf_s("Host parameter setting...\n"); // imgSize_Host numZ_Host = MAXZ_host; numX_Host = MAXX_host; // paraInt_Host transNum_Host = *paraInt; inhibitor_Host = *(paraInt + 1); lgLength_Host = *(paraInt + 2); datalength_Host = *(paraInt + 3); pickLength_Host = 1; pickLength_Host <<= lgLength_Host; // paraFloat_Host delayCoef_Host = *paraFloat; acoustVel_Host = *(paraFloat + 1); pitch_Host = *(paraFloat + 2); pSize_Host = *(paraFloat + 3); sampFreq_Host = *(paraFloat + 4); angleAperture_Host = 0.5; // imgSize_Dev checkCudaErrors(hipSetDevice(0)); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); checkCudaErrors(hipMemcpyToSymbol(numZ_Dev, &numZ_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(numX_Dev, &numX_Host, sizeof(int))); // paraInt_Dev checkCudaErrors(hipMemcpyToSymbol(transNum_Dev, &transNum_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(inhibitor_Dev, &inhibitor_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(lgLength_Dev, &lgLength_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(datalength_Dev, &datalength_Host, sizeof(int))); checkCudaErrors(hipMemcpyToSymbol(pickLength_Dev, &pickLength_Host, sizeof(int))); // paraFloat_Dev checkCudaErrors(hipMemcpyToSymbol(delayCoef_Dev, &delayCoef_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(acoustVel_Dev, &acoustVel_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(pitch_Dev, &pitch_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(pSize_Dev, &pSize_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(sampFreq_Dev, &sampFreq_Host, sizeof(float))); checkCudaErrors(hipMemcpyToSymbol(angleAperture_Dev, &angleAperture_Host, sizeof(float))); sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); printf_s("Device parameter setting...\n"); // float*_Dev malloc checkCudaErrors(hipMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float))); checkCudaErrors(hipMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float))); // int*_Dev malloc checkCudaErrors(hipMalloc((void**)&(krev), pickLength_Host*sizeof(int))); //calculate parameter of fft int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int)); for (int k = 0; k < pickLength_Host; ++k) { int r = k; *(krev_Host + k) = (r & 0x1); for (int j = 1; j < lgLength_Host; ++j) { *(krev_Host + k) = (*(krev_Host + k)) << 1; r = r >> 1; if (r & 0x1) ++(*(krev_Host + k)); } } checkCudaErrors(hipMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), hipMemcpyHostToDevice)); free(krev_Host); float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)), *wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)); int m = 1; float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host; for (int s = 1; s <= lgLength_Host; ++s) { m *= 2; wm_real = cos(2 * pi * 1 / m); wm_imag = -sin(2 * pi * 1 / m); w_realRec = 1; w_imagRec = 0; for (int j = 0; j < (m / 2); ++j) { //w = w * wm = t * wm; *(wreal_now + j) = w_realRec; *(wimag_now + j) = w_imagRec; w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag; w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real; } wreal_now += m / 2; wimag_now += m / 2; } checkCudaErrors(hipMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), hipMemcpyHostToDevice)); free(wreal_Host); free(wimag_Host); // copy host data to device checkCudaErrors(hipMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), hipMemcpyHostToDevice)); } extern "C" void clearcudas() { checkCudaErrors(hipFree(data_Dev)); checkCudaErrors(hipFree(imgRecons_Dev)); checkCudaErrors(hipFree(dataPick)); checkCudaErrors(hipFree(y_real)); checkCudaErrors(hipFree(y_imag)); checkCudaErrors(hipFree(w_real)); checkCudaErrors(hipFree(w_imag)); checkCudaErrors(hipFree(krev)); } __device__ void getEvelope(int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag) { // 2_DFT float *px = x; for (int k = 0; k < pickLength_Dev; ++k) { *(y_real + *(krev + k)) = *px; *(y_imag + *(krev + k)) = 0; ++px; } int m = 1; float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } // HilbertTran int count = 0; for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even { (*(y_real + count)) *= 2; (*(y_imag + count)) *= 2; } for (count += 1; count < pickLength_Dev; ++count) { (*(y_real + count)) *= 0; (*(y_imag + count)) *= 0; } for (int k = 0; k < pickLength_Dev; ++k) { count = *(krev + k); if (count == k) { *(y_imag + k) = -(*(y_imag + k)); } else if (k < count) { t_real = *(y_real + k); t_imag = *(y_imag + k); *(y_real + k) = *(y_real + count); *(y_imag + k) = -(*(y_imag + count)); *(y_real + count) = t_real; *(y_imag + count) = -t_imag; } } m = 1; wreal_now = w_real; wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } int div_len = pickLength_Dev*pickLength_Dev; for (int i = 0; i < pickLength_Dev; ++i) { *(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i)); *(x + i) /= div_len; } } __global__ void PArecon(float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart) { // access thread id const unsigned int tidx = threadIdx.x; // access block id const unsigned int bidx = blockIdx.x; if (bidx < zstart) { return; } float Distan; float Y, Z, y; int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx); float *pickBeg = dataPick + pointer; int pick_offset = pickLength_Dev / 2; Z = bidx * pSize_Dev; Y = tidx * pSize_Dev; int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_start < 0) { y_start = 0; } int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_end > transNum_Dev - 1) { y_end = transNum_Dev - 1; } for (int len = 0; len < pickLength_Dev; ++len) { *(pickBeg + len) = 0; } int lenMax; for (int bidy = y_start; bidy <= y_end; ++bidy) { y = (bidy + 0.5) * pitch_Dev; Distan = sqrt((Y - y)*(Y - y) + Z*Z); POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset; lenMax = pickLength_Dev; if (POINTER + lenMax >= datalength_Dev){ lenMax = datalength_Dev - 1 - POINTER; } if (POINTER >= 0 && POINTER < datalength_Dev) { POINTER = POINTER + bidy*datalength_Dev; for (int len = 0; len < lenMax; ++len) { *(pickBeg + len) += *(data_Dev + POINTER + len); } } } getEvelope(krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer); lenMax = 0; for (int len = 1; len < pickLength_Dev - 1; ++len) { if (*(pickBeg + len) > *(pickBeg + lenMax)) { lenMax = len; } } if (*(pickBeg + lenMax) > 0) { *(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset); for (int i = 1; i < inhibitor_Dev; ++i) { *(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset); *(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax); } } __syncthreads(); } __host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s checkCudaErrors(hipSetDevice(cudadeviceindex)); // setup execution parameters dim3 grids(numZ_Host, 1, 1); dim3 threads(numX_Host, 1, 1); // execute the kernel PArecon << < grids, threads >> >(data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // copy result from device to host checkCudaErrors(hipMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), hipMemcpyDeviceToHost)); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); } void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host) { int devID = 0; printf_s("Initializing...\n"); initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host); printf_s("Reconstructing...\n"); parecon(devID, MAXZ_host, 0, imgRecons); printf_s("Clearing...\n"); clearcudas(); } int main() { using namespace std; int *paraInt = new int[4]; paraInt[0] = 128; paraInt[1] = 3; paraInt[2] = 4; paraInt[3] = 1024; float *paraFloat = new float[5]; paraFloat[0] = 0; paraFloat[1] = 1.54; paraFloat[2] = 0.3; paraFloat[3] = 0.1; paraFloat[4] = 40; int MAXX_host = 384, MAXZ_host = (int)(paraInt[3] * paraFloat[1] / paraFloat[4] / paraFloat[3]); ifstream fin("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\signal\\Rf_032918_113516_OBP_PA_64_15331342.txt"); float *data = (float *)malloc(paraInt[0] * paraInt[3] * sizeof(float)); for (int i = 0; i < paraInt[0] * paraInt[3]; ++i) { fin >> *(data + i); } fin.close(); // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq printf_s("PA reconstructing...\n"); float *imgRecons = (float *)malloc(MAXX_host*MAXZ_host*sizeof(float)); MultiDASEnv(paraInt, paraFloat, data, imgRecons, MAXZ_host, MAXX_host); ofstream fout("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\recons\\fig_recons.txt"); for (int i = 0; i < MAXZ_host*MAXX_host; ++i) { fout << *(imgRecons + i); fout << " "; } fout.close(); free(data); }
fa94f68a8c04453b49655ed047566c8df81ec186.cu
// includes, system #include <iostream> #include <fstream> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes CUDA #include <cuda_runtime.h> // includes, project #include <device_functions.h> #include <device_launch_parameters.h> #include <helper_cuda.h> #include <helper_functions.h> // helper functions for SDK examples //define parameter #define pi 3.1415 #define MAXCUDADEVICES 1 #define threadNum 246 __device__ int numZ_Dev, numX_Dev, datalength_Dev, transNum_Dev, pickLength_Dev, lgLength_Dev, inhibitor_Dev; __device__ float pitch_Dev, pSize_Dev, sampFreq_Dev, acoustVel_Dev, angleAperture_Dev, delayCoef_Dev; int numZ_Host, numX_Host, datalength_Host, transNum_Host, pickLength_Host, lgLength_Host, inhibitor_Host; float pitch_Host, pSize_Host, sampFreq_Host, acoustVel_Host, angleAperture_Host, delayCoef_Host; float *data_Dev, *imgRecons_Dev, *dataPick, *y_real, *y_imag; int *krev; float *w_real, *w_imag; // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraIntDev : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq // paraFloatDev : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq extern "C" void initcudas(int *paraInt, float *paraFloat, float *data, int MAXZ_host, int MAXX_host) { printf_s("Host parameter setting...\n"); // imgSize_Host numZ_Host = MAXZ_host; numX_Host = MAXX_host; // paraInt_Host transNum_Host = *paraInt; inhibitor_Host = *(paraInt + 1); lgLength_Host = *(paraInt + 2); datalength_Host = *(paraInt + 3); pickLength_Host = 1; pickLength_Host <<= lgLength_Host; // paraFloat_Host delayCoef_Host = *paraFloat; acoustVel_Host = *(paraFloat + 1); pitch_Host = *(paraFloat + 2); pSize_Host = *(paraFloat + 3); sampFreq_Host = *(paraFloat + 4); angleAperture_Host = 0.5; // imgSize_Dev checkCudaErrors(cudaSetDevice(0)); StopWatchInterface *timer = 0; sdkCreateTimer(&timer); sdkStartTimer(&timer); checkCudaErrors(cudaMemcpyToSymbol(numZ_Dev, &numZ_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(numX_Dev, &numX_Host, sizeof(int))); // paraInt_Dev checkCudaErrors(cudaMemcpyToSymbol(transNum_Dev, &transNum_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(inhibitor_Dev, &inhibitor_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(lgLength_Dev, &lgLength_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(datalength_Dev, &datalength_Host, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(pickLength_Dev, &pickLength_Host, sizeof(int))); // paraFloat_Dev checkCudaErrors(cudaMemcpyToSymbol(delayCoef_Dev, &delayCoef_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(acoustVel_Dev, &acoustVel_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(pitch_Dev, &pitch_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(pSize_Dev, &pSize_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(sampFreq_Dev, &sampFreq_Host, sizeof(float))); checkCudaErrors(cudaMemcpyToSymbol(angleAperture_Dev, &angleAperture_Host, sizeof(float))); sdkStopTimer(&timer); printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer)); sdkDeleteTimer(&timer); printf_s("Device parameter setting...\n"); // float*_Dev malloc checkCudaErrors(cudaMalloc((void**)&(data_Dev), transNum_Host*datalength_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(imgRecons_Dev), numZ_Host*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(dataPick), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(y_real), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(y_imag), pickLength_Host*threadNum*numX_Host*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(w_real), (pickLength_Host - 1)*sizeof(float))); checkCudaErrors(cudaMalloc((void**)&(w_imag), (pickLength_Host - 1)*sizeof(float))); // int*_Dev malloc checkCudaErrors(cudaMalloc((void**)&(krev), pickLength_Host*sizeof(int))); //calculate parameter of fft int *krev_Host = (int *)malloc(pickLength_Host*sizeof(int)); for (int k = 0; k < pickLength_Host; ++k) { int r = k; *(krev_Host + k) = (r & 0x1); for (int j = 1; j < lgLength_Host; ++j) { *(krev_Host + k) = (*(krev_Host + k)) << 1; r = r >> 1; if (r & 0x1) ++(*(krev_Host + k)); } } checkCudaErrors(cudaMemcpy(krev, krev_Host, pickLength_Host*sizeof(int), cudaMemcpyHostToDevice)); free(krev_Host); float *wreal_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)), *wimag_Host = (float *)malloc((pickLength_Host - 1)*sizeof(float)); int m = 1; float wm_real, wm_imag, w_realRec, w_imagRec, *wreal_now = wreal_Host, *wimag_now = wimag_Host; for (int s = 1; s <= lgLength_Host; ++s) { m *= 2; wm_real = cos(2 * pi * 1 / m); wm_imag = -sin(2 * pi * 1 / m); w_realRec = 1; w_imagRec = 0; for (int j = 0; j < (m / 2); ++j) { //w = w * wm = t * wm; *(wreal_now + j) = w_realRec; *(wimag_now + j) = w_imagRec; w_realRec = *(wreal_now + j)*wm_real - *(wimag_now + j)*wm_imag; w_imagRec = *(wreal_now + j)*wm_imag + *(wimag_now + j)*wm_real; } wreal_now += m / 2; wimag_now += m / 2; } checkCudaErrors(cudaMemcpy(w_real, wreal_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(w_imag, wimag_Host, (pickLength_Host - 1)*sizeof(float), cudaMemcpyHostToDevice)); free(wreal_Host); free(wimag_Host); // copy host data to device checkCudaErrors(cudaMemcpy(data_Dev, data, transNum_Host*datalength_Host*sizeof(float), cudaMemcpyHostToDevice)); } extern "C" void clearcudas() { checkCudaErrors(cudaFree(data_Dev)); checkCudaErrors(cudaFree(imgRecons_Dev)); checkCudaErrors(cudaFree(dataPick)); checkCudaErrors(cudaFree(y_real)); checkCudaErrors(cudaFree(y_imag)); checkCudaErrors(cudaFree(w_real)); checkCudaErrors(cudaFree(w_imag)); checkCudaErrors(cudaFree(krev)); } __device__ void getEvelope(int *krev, float *w_real, float *w_imag, float *x, float *y_real, float *y_imag) { // 2_DFT float *px = x; for (int k = 0; k < pickLength_Dev; ++k) { *(y_real + *(krev + k)) = *px; *(y_imag + *(krev + k)) = 0; ++px; } int m = 1; float t_real, t_imag, u_real, u_imag, *wreal_now = w_real, *wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } // HilbertTran int count = 0; for (count = 1; count < pickLength_Dev / 2; ++count) //pickLength must be even { (*(y_real + count)) *= 2; (*(y_imag + count)) *= 2; } for (count += 1; count < pickLength_Dev; ++count) { (*(y_real + count)) *= 0; (*(y_imag + count)) *= 0; } for (int k = 0; k < pickLength_Dev; ++k) { count = *(krev + k); if (count == k) { *(y_imag + k) = -(*(y_imag + k)); } else if (k < count) { t_real = *(y_real + k); t_imag = *(y_imag + k); *(y_real + k) = *(y_real + count); *(y_imag + k) = -(*(y_imag + count)); *(y_real + count) = t_real; *(y_imag + count) = -t_imag; } } m = 1; wreal_now = w_real; wimag_now = w_imag; for (int s = 1; s <= lgLength_Dev; ++s) { m *= 2; for (int k = 0; k < pickLength_Dev; k += m) { for (int j = 0; j < (m / 2); ++j) { //t = w * (*(y+k+j+m/2)) t_real = *(wreal_now + j)*(*(y_real + k + j + m / 2)) - *(wimag_now + j)*(*(y_imag + k + j + m / 2)); t_imag = *(wreal_now + j)*(*(y_imag + k + j + m / 2)) + *(wimag_now + j)*(*(y_real + k + j + m / 2)); u_real = *(y_real + k + j); u_imag = *(y_imag + k + j); *(y_real + k + j) = u_real + t_real; *(y_imag + k + j) = u_imag + t_imag; *(y_real + k + j + m / 2) = u_real - t_real; *(y_imag + k + j + m / 2) = u_imag - t_imag; } } wreal_now += m / 2; wimag_now += m / 2; } int div_len = pickLength_Dev*pickLength_Dev; for (int i = 0; i < pickLength_Dev; ++i) { *(x + i) = (*(y_real + i))*(*(y_real + i)) + (*(y_imag + i))*(*(y_imag + i)); *(x + i) /= div_len; } } __global__ void PArecon(float *data_Dev, float *imgRecons_Dev, float *dataPick, int *krev, float *w_real, float *w_imag, float *y_real, float *y_imag, int zdepth, int zstart) { // access thread id const unsigned int tidx = threadIdx.x; // access block id const unsigned int bidx = blockIdx.x; if (bidx < zstart) { return; } float Distan; float Y, Z, y; int POINTER, pointer = pickLength_Dev*((bidx % threadNum)*numX_Dev + tidx); float *pickBeg = dataPick + pointer; int pick_offset = pickLength_Dev / 2; Z = bidx * pSize_Dev; Y = tidx * pSize_Dev; int y_start = (int)((Y - Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_start < 0) { y_start = 0; } int y_end = (int)((Y + Z*angleAperture_Dev) / pitch_Dev - 0.5); if (y_end > transNum_Dev - 1) { y_end = transNum_Dev - 1; } for (int len = 0; len < pickLength_Dev; ++len) { *(pickBeg + len) = 0; } int lenMax; for (int bidy = y_start; bidy <= y_end; ++bidy) { y = (bidy + 0.5) * pitch_Dev; Distan = sqrt((Y - y)*(Y - y) + Z*Z); POINTER = (int)((Distan / acoustVel_Dev - delayCoef_Dev)*sampFreq_Dev + 0.5) - pick_offset; lenMax = pickLength_Dev; if (POINTER + lenMax >= datalength_Dev){ lenMax = datalength_Dev - 1 - POINTER; } if (POINTER >= 0 && POINTER < datalength_Dev) { POINTER = POINTER + bidy*datalength_Dev; for (int len = 0; len < lenMax; ++len) { *(pickBeg + len) += *(data_Dev + POINTER + len); } } } getEvelope(krev, w_real, w_imag, pickBeg, y_real + pointer, y_imag + pointer); lenMax = 0; for (int len = 1; len < pickLength_Dev - 1; ++len) { if (*(pickBeg + len) > *(pickBeg + lenMax)) { lenMax = len; } } if (*(pickBeg + lenMax) > 0) { *(imgRecons_Dev + tidx*zdepth + bidx) = *(pickBeg + pick_offset); for (int i = 1; i < inhibitor_Dev; ++i) { *(imgRecons_Dev + tidx*zdepth + bidx) *= *(pickBeg + pick_offset); *(imgRecons_Dev + tidx*zdepth + bidx) /= *(pickBeg + lenMax); } } __syncthreads(); } __host__ void parecon(int cudadeviceindex, int zdepth, int zstart, float *imgRecons) { // use command-line specified CUDA device, otherwise use device with highest Gflops/s checkCudaErrors(cudaSetDevice(cudadeviceindex)); // setup execution parameters dim3 grids(numZ_Host, 1, 1); dim3 threads(numX_Host, 1, 1); // execute the kernel PArecon << < grids, threads >> >(data_Dev, imgRecons_Dev, dataPick, krev, w_real, w_imag, y_real, y_imag, zdepth, zstart); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); // copy result from device to host checkCudaErrors(cudaMemcpy(imgRecons, imgRecons_Dev, numX_Host*zdepth*sizeof(float), cudaMemcpyDeviceToHost)); // check if kernel execution generated and error getLastCudaError("Kernel execution failed"); } void MultiDASEnv(int *paraInt, float *paraFloat, float *data, float *imgRecons, int MAXZ_host, int MAXX_host) { int devID = 0; printf_s("Initializing...\n"); initcudas(paraInt, paraFloat, data, MAXZ_host, MAXX_host); printf_s("Reconstructing...\n"); parecon(devID, MAXZ_host, 0, imgRecons); printf_s("Clearing...\n"); clearcudas(); } int main() { using namespace std; int *paraInt = new int[4]; paraInt[0] = 128; paraInt[1] = 3; paraInt[2] = 4; paraInt[3] = 1024; float *paraFloat = new float[5]; paraFloat[0] = 0; paraFloat[1] = 1.54; paraFloat[2] = 0.3; paraFloat[3] = 0.1; paraFloat[4] = 40; int MAXX_host = 384, MAXZ_host = (int)(paraInt[3] * paraFloat[1] / paraFloat[4] / paraFloat[3]); ifstream fin("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\signal\\Rf_032918_113516_OBP_PA_64_15331342.txt"); float *data = (float *)malloc(paraInt[0] * paraInt[3] * sizeof(float)); for (int i = 0; i < paraInt[0] * paraInt[3]; ++i) { fin >> *(data + i); } fin.close(); // paraInt : 0-transElement, 1-inhibitor, 2-lgLength, 3-dataLength // paraFloat : 0-delayCoef, 1-acoustVel, 2-pitch, 3-pixelSize, 4-samFreq printf_s("PA reconstructing...\n"); float *imgRecons = (float *)malloc(MAXX_host*MAXZ_host*sizeof(float)); MultiDASEnv(paraInt, paraFloat, data, imgRecons, MAXZ_host, MAXX_host); ofstream fout("C:\\Users\\MX\\Documents\\research\\PA Reconstructions\\GPUProg\\template\\recons\\fig_recons.txt"); for (int i = 0; i < MAXZ_host*MAXX_host; ++i) { fout << *(imgRecons + i); fout << " "; } fout.close(); free(data); }
57c17b616a1bd9944834a081f8d12c38c6ae93cf.hip
// !!! This is a file automatically generated by hipify!!! /* Parallel Algorithms Homework 3 Team Members: Dhruv Gupta and Sravya Kambhapathi */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "opencv2/opencv.hpp" #include "opencv2/core/core.hpp" #include "opencv2/core/cuda.hpp" #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" #include <math.h> #include <stdlib.h> //#include "opencv2/cudawarping.hpp" #include <stdio.h> #include <helper_cuda.h> #ifndef __HIPCC__ #define __HIPCC__ #endif #include <hip/device_functions.h> using namespace cv; using namespace std; hipError_t blurWithCuda(uchar* in, uchar* out, int w, int h, int SCALE); //void blurKernel(uchar * in, uchar * out, int w, int h); int SCALE = 2; __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } /* __global__ void blurKernel(uchar* in, uchar* out, int w, int h) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < w && Row < h) { int pixVal = 0; int pixels = 0; for (int blurRow = -SCALE; blurRow < SCALE + 1; ++blurRow) { for (int blurCol = -SCALE; blurCol < SCALE + 1; ++blurCol) { int curRow = Row + blurRow; int curCol = Col + blurCol; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { pixVal += in[curRow * w + curCol]; pixels++; } } } out[Row * w + Col] = (unsigned char)(pixVal / pixels); } } */ __global__ void blurKernel(uchar* in, uchar* out, int w, int h, int SCALE) { int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; if (i < h*SCALE && j < w* SCALE) { int iIn = (i / SCALE); int jIn = (j / SCALE); out[i * w*SCALE + j] = in[iIn * w + jIn]; } } int main(int argc, char* argv[]) { //Mat image = imread("lena512.bmp", IMREAD_GRAYSCALE); // Read the file const char* name; if (argc < 2) { printf("usage: nearestNeighbor2.exe <filename> <scale>"); //strcpy(name, "lena512.bmp"); name = "lena512.bmp"; }else { char* pEnd; //strcpy(name,argv[2]); name = argv[1]; SCALE = (int)strtol(argv[2], &pEnd, 10); } Mat image; image = imread(name, IMREAD_GRAYSCALE); namedWindow("Display window", WINDOW_AUTOSIZE); imshow("Display window", image); //waitKey(0); //std::cout << image.channels(); // import image int rows = image.rows; int cols = image.cols; uchar* in = image.data; uchar * out = (uchar *) malloc(rows * cols * SCALE * SCALE+1); // Add vectors in parallel. hipError_t cudaStatus = blurWithCuda(in, out, cols, rows, SCALE); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } Mat out_mat = Mat(rows* SCALE, cols* SCALE, CV_8UC1, out); namedWindow("Display window2", WINDOW_AUTOSIZE); imshow("Display window2", out_mat); waitKey(0); /* printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); */ // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t blurWithCuda(uchar* in, uchar* out, int w, int h, int SCALE) { uchar * dPin; uchar * dPout; hipError_t cudaStatus; //dim3 dimGrid(ceil(h / 16.0), ceil(w / 16.0), 1); //dim3 dimBlock(16, 16, 1); dim3 dimBlock(16, 16); dim3 dimGrid( ((w* SCALE)/16)+1 , ((h * SCALE)/16) +1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dPin, w * h * sizeof(uchar)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dPout, SCALE * SCALE *w * h * sizeof(uchar)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dPin, in, w * h * sizeof(uchar), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dPout, out, w * h * sizeof(uchar), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. //addKernel<<<1, size>>>(dev_c, dev_a, dev_b); hipLaunchKernelGGL(( blurKernel) , dim3(dimGrid), dim3(dimBlock) , 0, 0, dPin, dPout, w, h, SCALE); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(out, dPout, w * h * SCALE * SCALE* sizeof(uchar), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dPin); hipFree(dPout); return cudaStatus; }
57c17b616a1bd9944834a081f8d12c38c6ae93cf.cu
/* Parallel Algorithms Homework 3 Team Members: Dhruv Gupta and Sravya Kambhapathi */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "opencv2/opencv.hpp" #include "opencv2/core/core.hpp" #include "opencv2/core/cuda.hpp" #include <opencv2/highgui/highgui.hpp> #include "opencv2/imgproc/imgproc.hpp" #include <math.h> #include <stdlib.h> //#include "opencv2/cudawarping.hpp" #include <stdio.h> #include <helper_cuda.h> #ifndef __CUDACC__ #define __CUDACC__ #endif #include <device_functions.h> using namespace cv; using namespace std; cudaError_t blurWithCuda(uchar* in, uchar* out, int w, int h, int SCALE); //void blurKernel(uchar * in, uchar * out, int w, int h); int SCALE = 2; __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } /* __global__ void blurKernel(uchar* in, uchar* out, int w, int h) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < w && Row < h) { int pixVal = 0; int pixels = 0; for (int blurRow = -SCALE; blurRow < SCALE + 1; ++blurRow) { for (int blurCol = -SCALE; blurCol < SCALE + 1; ++blurCol) { int curRow = Row + blurRow; int curCol = Col + blurCol; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { pixVal += in[curRow * w + curCol]; pixels++; } } } out[Row * w + Col] = (unsigned char)(pixVal / pixels); } } */ __global__ void blurKernel(uchar* in, uchar* out, int w, int h, int SCALE) { int i = blockDim.y * blockIdx.y + threadIdx.y; int j = blockDim.x * blockIdx.x + threadIdx.x; if (i < h*SCALE && j < w* SCALE) { int iIn = (i / SCALE); int jIn = (j / SCALE); out[i * w*SCALE + j] = in[iIn * w + jIn]; } } int main(int argc, char* argv[]) { //Mat image = imread("lena512.bmp", IMREAD_GRAYSCALE); // Read the file const char* name; if (argc < 2) { printf("usage: nearestNeighbor2.exe <filename> <scale>"); //strcpy(name, "lena512.bmp"); name = "lena512.bmp"; }else { char* pEnd; //strcpy(name,argv[2]); name = argv[1]; SCALE = (int)strtol(argv[2], &pEnd, 10); } Mat image; image = imread(name, IMREAD_GRAYSCALE); namedWindow("Display window", WINDOW_AUTOSIZE); imshow("Display window", image); //waitKey(0); //std::cout << image.channels(); // import image int rows = image.rows; int cols = image.cols; uchar* in = image.data; uchar * out = (uchar *) malloc(rows * cols * SCALE * SCALE+1); // Add vectors in parallel. cudaError_t cudaStatus = blurWithCuda(in, out, cols, rows, SCALE); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } Mat out_mat = Mat(rows* SCALE, cols* SCALE, CV_8UC1, out); namedWindow("Display window2", WINDOW_AUTOSIZE); imshow("Display window2", out_mat); waitKey(0); /* printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); */ // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t blurWithCuda(uchar* in, uchar* out, int w, int h, int SCALE) { uchar * dPin; uchar * dPout; cudaError_t cudaStatus; //dim3 dimGrid(ceil(h / 16.0), ceil(w / 16.0), 1); //dim3 dimBlock(16, 16, 1); dim3 dimBlock(16, 16); dim3 dimGrid( ((w* SCALE)/16)+1 , ((h * SCALE)/16) +1); // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dPin, w * h * sizeof(uchar)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dPout, SCALE * SCALE *w * h * sizeof(uchar)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dPin, in, w * h * sizeof(uchar), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dPout, out, w * h * sizeof(uchar), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. //addKernel<<<1, size>>>(dev_c, dev_a, dev_b); blurKernel <<< dimGrid, dimBlock >>> (dPin, dPout, w, h, SCALE); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(out, dPout, w * h * SCALE * SCALE* sizeof(uchar), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dPin); cudaFree(dPout); return cudaStatus; }
3c1e6cc87b1888d3755c0b40886108d9b4b8da87.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void bounce(float* to, float* from, rrnode *dev_bounce, unsigned char *ncls, int revcount) { int i, j, k, x, y, z; int n= get_global_id(0); if(n>=revcount) { //printf("Overflow n=%d",n); return; } i=dev_bounce[n].i; j=dev_bounce[n].j; k=dev_bounce[n].k; for(int l=0;l<DIRECTIONS;++l){ if(dev_bounce[n].del[l] > -1){ x = i - ci[l].x; y = j - ci[l].y; z = k - ci[l].z; if(ncls[cstore(x,y,z)]==FFLOW){ to[store(x,y,z,opp[l])] = from[store(x,y,z,l)]; } } } //printf("I am in bounce"); return; }
3c1e6cc87b1888d3755c0b40886108d9b4b8da87.cu
__global__ void bounce(float* to, float* from, rrnode *dev_bounce, unsigned char *ncls, int revcount) { int i, j, k, x, y, z; int n= get_global_id(0); if(n>=revcount) { //printf("Overflow n=%d",n); return; } i=dev_bounce[n].i; j=dev_bounce[n].j; k=dev_bounce[n].k; for(int l=0;l<DIRECTIONS;++l){ if(dev_bounce[n].del[l] > -1){ x = i - ci[l].x; y = j - ci[l].y; z = k - ci[l].z; if(ncls[cstore(x,y,z)]==FFLOW){ to[store(x,y,z,opp[l])] = from[store(x,y,z,l)]; } } } //printf("I am in bounce"); return; }
36c4bd6dc63ae3b1078743768f7d02a0f395ed9d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> __global__ void checkIndx(void) { printf("threadIdx:(%d, %d, %d) " "blockIdx:(%d, %d, %d) " "blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main() { int nElem = 64; /** *here, we define 2 blocks, each of which has 3 threads. */ for (int i = 1; i < 5; ++i) { if (i == 1 || i % 2 == 0) { dim3 block (nElem / i); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d, block.y %d, block.z %d\n", block.x, block.y, block.z); hipLaunchKernelGGL(( checkIndx) , dim3(grid), dim3(block), 0, 0, ); } } hipDeviceReset(); return 0; }
36c4bd6dc63ae3b1078743768f7d02a0f395ed9d.cu
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndx(void) { printf("threadIdx:(%d, %d, %d) " "blockIdx:(%d, %d, %d) " "blockDim:(%d, %d, %d) " "gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main() { int nElem = 64; /** *here, we define 2 blocks, each of which has 3 threads. */ for (int i = 1; i < 5; ++i) { if (i == 1 || i % 2 == 0) { dim3 block (nElem / i); dim3 grid ((nElem + block.x - 1) / block.x); printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d, block.y %d, block.z %d\n", block.x, block.y, block.z); checkIndx <<<grid, block>>>(); } } cudaDeviceReset(); return 0; }
391ec203a7f58522957ef223d309570292491179.hip
// !!! This is a file automatically generated by hipify!!! /* * noise_remover_v1.cu * * This program removes noise from an image based on Speckle Reducing Anisotropic Diffusion * Y. Yu, S. Acton, Speckle reducing anisotropic diffusion, * IEEE Transactions on Image Processing 11(11)(2002) 1260-1270 <http://people.virginia.edu/~sc5nf/01097762.pdf> * Original implementation is Modified by Burak BASTEM * * This code was developed based on the Rodinia Benchmark Suite and * some changes have been made on this algorithm. * * IMPORTANT: * * The final version of this code has been developed by Mustafa SARA and Mustafa Mert GETRK * as a project of Parallel Programming (COMP 429) course. Ko University's code of ethics * can be applied to this code and liability can not be accepted for any negative situation. * Therefore, be careful when you get content from here. * * The serialized version of the noise removal algorithm was parallelized using various * methods via the CUDA library. * * The parallelization process consists of 3 steps in total and these are as follows: * - noise_remover_v1.cu (Naive implementation) * - noise_remover_v2.cu (Using Temporary Variables to Eliminate Global Memory References) * - noise_remover_v3.cu (Using Shared Memory on the GPU) * * For more detailed questions you can review our project report. * * You can also contact me at this email address: [email protected] */ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <sys/time.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define MATCH(s) (!strcmp(argv[ac], (s))) static const double kMicro = 1.0e-6; double get_time() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } //COMPUTE 1 // --- 32 floating point arithmetic operations per element -> 32*(height-1)*(width-1) in total __global__ void compute1(int height, int width, long k, unsigned char *image_d, float *north_deriv_d, float *south_deriv_d, float *west_deriv_d, float *east_deriv_d, float gradient_square, float laplacian, float num, float den, float std_dev, float std_dev2, float *diff_coef_d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < height && j < width) { k = i * width + j; // position of current element north_deriv_d[k] = image_d[(i - 1) * width + j] - image_d[k]; // north derivative --- 1 floating point arithmetic operations south_deriv_d[k] = image_d[(i + 1) * width + j] - image_d[k]; // south derivative --- 1 floating point arithmetic operations west_deriv_d[k] = image_d[i * width + (j - 1)] - image_d[k]; // west derivative --- 1 floating point arithmetic operations east_deriv_d[k] = image_d[i * width + (j + 1)] - image_d[k]; // east derivative --- 1 floating point arithmetic operations gradient_square = (north_deriv_d[k] * north_deriv_d[k] + south_deriv_d[k] * south_deriv_d[k] + west_deriv_d[k] * west_deriv_d[k] + east_deriv_d[k] * east_deriv_d[k]) / (image_d[k] * image_d[k]); // 9 floating point arithmetic operations laplacian = (north_deriv_d[k] + south_deriv_d[k] + west_deriv_d[k] + east_deriv_d[k]) / image_d[k]; // 4 floating point arithmetic operations num = (0.5 * gradient_square) - ((1.0 / 16.0) * (laplacian * laplacian)); // 5 floating point arithmetic operations den = 1 + (.25 * laplacian); // 2 floating point arithmetic operations std_dev2 = num / (den * den); // 2 floating point arithmetic operations den = (std_dev2 - std_dev) / (std_dev * (1 + std_dev)); // 4 floating point arithmetic operations diff_coef_d[k] = 1.0 / (1.0 + den); // 2 floating point arithmetic operations if (diff_coef_d[k] < 0) { diff_coef_d[k] = 0; } else if (diff_coef_d[k] > 1) { diff_coef_d[k] = 1; } } else { return; } } // COMPUTE 2 // divergence and image update --- 10 floating point arithmetic operations per element -> 10*(height-1)*(width-1) in total __global__ void compute2(int height, int width, long k, unsigned char *image_d, float lambda, float diff_coef_north, float diff_coef_south, float diff_coef_west, float diff_coef_east, float divergence, float *diff_coef_d, float *north_deriv_d, float *south_deriv_d, float *west_deriv_d, float *east_deriv_d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < height && j < width) { k = i * width + j; // get position of current element diff_coef_north = diff_coef_d[k]; // north diffusion coefficient diff_coef_south = diff_coef_d[(i + 1) * width + j]; // south diffusion coefficient diff_coef_west = diff_coef_d[k]; // west diffusion coefficient diff_coef_east = diff_coef_d[i * width + (j + 1)]; // east diffusion coefficient divergence = diff_coef_north * north_deriv_d[k] + diff_coef_south * south_deriv_d[k] + diff_coef_west * west_deriv_d[k] + diff_coef_east * east_deriv_d[k]; // --- 7 floating point arithmetic operations image_d[k] = image_d[k] + 0.25 * lambda * divergence; // --- 3 floating point arithmetic operations } else { return; } } // REDUCTION __global__ void summation(unsigned char *image_d, float *sum_d, float *sum2_d, int height, int width, int pixelWidth) { __shared__ float seg_sum[2 * 16]; int globalThreadId = blockDim.x * blockIdx.x + threadIdx.x; unsigned int threadId = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; int length = height * width * pixelWidth; if((start + threadId) < length) { seg_sum[threadId] = image_d[start + threadId]; } else { seg_sum[threadId] = 0.0; } if((start + blockDim.x + threadId) < length) { seg_sum[blockDim.x + threadId] = image_d[start + blockDim.x + threadId]; } else { seg_sum[blockDim.x + threadId] = 0.0; } for(unsigned int stage = blockDim.x; stage > 0; stage /= 2) { __syncthreads(); if(threadId < stage) { seg_sum[threadId] += seg_sum[threadId + stage]; } __syncthreads(); if(threadId == 0 && (globalThreadId * 2) < length){ sum_d[blockIdx.x] = seg_sum[threadId]; sum2_d[blockIdx.x] = seg_sum[threadId]*seg_sum[threadId]; } } } int main(int argc, char *argv[]) { // Part I: allocate and initialize variables double time_0, time_1, time_2, time_3, time_4, time_5, time_6, time_7, time_8; // time variables time_0 = get_time(); const char *filename = "input.pgm"; const char *outputname = "output.png"; int width, height, pixelWidth, n_pixels; int n_iter = 50; float lambda = 0.5; float mean, variance, std_dev; //local region statistics float *north_deriv, *south_deriv, *west_deriv, *east_deriv; // directional derivatives float *north_deriv_d, *south_deriv_d, *west_deriv_d, *east_deriv_d; // derivatives in the device float tmp, sum, sum2; // calculation variables float *sum_d; // variable in the device float *sum2_d; // variable in the device float gradient_square, laplacian, num, den, std_dev2, divergence; // calculation variables float *diff_coef; // diffusion coefficient float *diff_coef_d; //coeffcent in the device float diff_coef_north, diff_coef_south, diff_coef_west, diff_coef_east; // directional diffusion coefficients long k; // current pixel index unsigned char *image_d; time_1 = get_time(); // Part II: parse command line arguments if(argc<2) { printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]); return(-1); } for(int ac=1;ac<argc;ac++) { if(MATCH("-i")) { filename = argv[++ac]; } else if(MATCH("-iter")) { n_iter = atoi(argv[++ac]); } else if(MATCH("-l")) { lambda = atof(argv[++ac]); } else if(MATCH("-o")) { outputname = argv[++ac]; } else { printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]); return(-1); } } time_2 = get_time(); // Part III: read image printf("Reading image...\n"); unsigned char *image = stbi_load(filename, &width, &height, &pixelWidth, 0); if (!image) { fprintf(stderr, "Couldn't load image.\n"); return (-1); } printf("Image Read. Width : %d, Height : %d, nComp: %d\n",width,height,pixelWidth); n_pixels = height * width; time_3 = get_time(); // Part IV: allocate variables north_deriv = (float*) malloc(sizeof(float) * n_pixels); // north derivative south_deriv = (float*) malloc(sizeof(float) * n_pixels); // south derivative west_deriv = (float*) malloc(sizeof(float) * n_pixels); // west derivative east_deriv = (float*) malloc(sizeof(float) * n_pixels); // east derivative diff_coef = (float*) malloc(sizeof(float) * n_pixels); // diffusion coefficient // Allocations hipMalloc((void**)&sum_d, sizeof(float)); hipMalloc((void**)&sum2_d, sizeof(float)); hipMalloc((void**)&north_deriv_d, sizeof(float)*n_pixels); hipMalloc((void**)&south_deriv_d, sizeof(float)*n_pixels); hipMalloc((void**)&east_deriv_d, sizeof(float)*n_pixels); hipMalloc((void**)&west_deriv_d, sizeof(float)*n_pixels); hipMalloc((void**)&image_d, (sizeof(unsigned char)*n_pixels) * pixelWidth); hipMalloc((void**)&diff_coef_d, sizeof(float)*n_pixels); time_4 = get_time(); // Memory Copying to the device hipMemcpy((void**)sum_d, &sum, sizeof(float), hipMemcpyHostToDevice); hipMemcpy((void**)sum2_d, &sum2, sizeof(float), hipMemcpyHostToDevice); hipMemcpy((void**)north_deriv_d, north_deriv, sizeof(float)*n_pixels, hipMemcpyHostToDevice); hipMemcpy((void**)south_deriv_d, south_deriv, sizeof(float)*n_pixels, hipMemcpyHostToDevice); hipMemcpy((void**)east_deriv_d, east_deriv, sizeof(float)*n_pixels, hipMemcpyHostToDevice); hipMemcpy((void**)west_deriv_d, west_deriv, sizeof(float)*n_pixels, hipMemcpyHostToDevice); hipMemcpy((void**)image_d, image, (sizeof(unsigned char)*n_pixels) * pixelWidth, hipMemcpyHostToDevice); hipMemcpy((void**)diff_coef_d, diff_coef, sizeof(float)*n_pixels, hipMemcpyHostToDevice); // setup execution configurations, creating 2D threads dim3 threads(16, 16, 1); dim3 grid(height/threads.x, width/threads.y); // Part V: compute --- n_iter * (3 * height * width + 42 * (height-1) * (width-1) + 6) floating point arithmetic operations in totaL for (int iter = 0; iter < n_iter; iter++) { sum = 0; sum2 = 0; // REDUCTION AND STATISTICS hipLaunchKernelGGL(( summation), dim3(grid), dim3(threads), 0, 0, image_d, sum_d, sum2_d, height, width, pixelWidth); hipDeviceSynchronize(); // Memory Copying to the host hipMemcpy(&sum,sum_d,sizeof(float), hipMemcpyDeviceToHost); hipMemcpy(&sum2,sum2_d,sizeof(float), hipMemcpyDeviceToHost); mean = sum / n_pixels; // --- 1 floating point arithmetic operations variance = (sum2 / n_pixels) - mean * mean; // --- 3 floating point arithmetic operations std_dev = variance / (mean * mean); // --- 2 floating point arithmetic operations // COMPUTE 1 hipLaunchKernelGGL(( compute1), dim3(grid),dim3(threads), 0, 0, height-1, width-1, k, image_d, north_deriv_d, south_deriv_d, west_deriv_d, east_deriv_d, gradient_square, laplacian, num, den, std_dev, std_dev2, diff_coef_d); hipDeviceSynchronize(); // COMPUTE 2 hipLaunchKernelGGL(( compute2), dim3(grid),dim3(threads), 0, 0, height -1, width -1, k, image_d, lambda, diff_coef_north, diff_coef_south, diff_coef_west, diff_coef_east, divergence, diff_coef_d, north_deriv_d, south_deriv_d, west_deriv_d, east_deriv_d); // Memory Copying Output Image to the host hipMemcpy(image,image_d,sizeof(unsigned char)*n_pixels * pixelWidth, hipMemcpyDeviceToHost); } time_5 = get_time(); // Part VI: write image to file stbi_write_png(outputname, width, height, pixelWidth, image, 0); time_6 = get_time(); // Part VII: get average of sum of pixels for testing and calculate GFLOPS // FOR VALIDATION - DO NOT PARALLELIZE float test = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { test += image[i * width + j]; } } test /= n_pixels; float gflops = (float) (n_iter * 1E-9 * (3 * height * width + 42 * (height-1) * (width-1) + 6)) / (time_5 - time_4); time_7 = get_time(); // Part VII: deallocate variables stbi_image_free(image); free(north_deriv); free(south_deriv); free(west_deriv); free(east_deriv); free(diff_coef); // Freeing the memory allocations hipFree(image_d); hipFree(north_deriv_d); hipFree(south_deriv_d); hipFree(west_deriv_d); hipFree(east_deriv_d); hipFree(sum_d); hipFree(sum2_d); hipFree(diff_coef_d); time_8 = get_time(); // print printf("Time spent in different stages of the application:\n"); printf("%9.6f s => Part I: allocate and initialize variables\n", (time_1 - time_0)); printf("%9.6f s => Part II: parse command line arguments\n", (time_2 - time_1)); printf("%9.6f s => Part III: read image\n", (time_3 - time_2)); printf("%9.6f s => Part IV: allocate variables\n", (time_4 - time_3)); printf("%9.6f s => Part V: compute\n", (time_5 - time_4)); printf("%9.6f s => Part VI: write image to file\n", (time_6 - time_5)); printf("%9.6f s => Part VII: get average of sum of pixels for testing and calculate GFLOPS\n", (time_7 - time_6)); printf("%9.6f s => Part VIII: deallocate variables\n", (time_7 - time_6)); printf("Total time: %9.6f s\n", (time_8 - time_0)); printf("Average of sum of pixels: %9.6f\n", test); printf("GFLOPS: %f\n", gflops); return 0; }
391ec203a7f58522957ef223d309570292491179.cu
/* * noise_remover_v1.cu * * This program removes noise from an image based on Speckle Reducing Anisotropic Diffusion * Y. Yu, S. Acton, Speckle reducing anisotropic diffusion, * IEEE Transactions on Image Processing 11(11)(2002) 1260-1270 <http://people.virginia.edu/~sc5nf/01097762.pdf> * Original implementation is Modified by Burak BASTEM * * This code was developed based on the Rodinia Benchmark Suite and * some changes have been made on this algorithm. * * IMPORTANT: * * The final version of this code has been developed by Mustafa SARAÇ and Mustafa Mert ÖGETÜRK * as a project of Parallel Programming (COMP 429) course. Koç University's code of ethics * can be applied to this code and liability can not be accepted for any negative situation. * Therefore, be careful when you get content from here. * * The serialized version of the noise removal algorithm was parallelized using various * methods via the CUDA library. * * The parallelization process consists of 3 steps in total and these are as follows: * - noise_remover_v1.cu (Naive implementation) * - noise_remover_v2.cu (Using Temporary Variables to Eliminate Global Memory References) * - noise_remover_v3.cu (Using Shared Memory on the GPU) * * For more detailed questions you can review our project report. * * You can also contact me at this email address: [email protected] */ #include <cuda_runtime.h> #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <math.h> #include <string.h> #include <sys/time.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" #define MATCH(s) (!strcmp(argv[ac], (s))) static const double kMicro = 1.0e-6; double get_time() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if(RC == -1) { printf("ERROR: Bad call to gettimeofday\n"); return(-1); } return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) ); } //COMPUTE 1 // --- 32 floating point arithmetic operations per element -> 32*(height-1)*(width-1) in total __global__ void compute1(int height, int width, long k, unsigned char *image_d, float *north_deriv_d, float *south_deriv_d, float *west_deriv_d, float *east_deriv_d, float gradient_square, float laplacian, float num, float den, float std_dev, float std_dev2, float *diff_coef_d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < height && j < width) { k = i * width + j; // position of current element north_deriv_d[k] = image_d[(i - 1) * width + j] - image_d[k]; // north derivative --- 1 floating point arithmetic operations south_deriv_d[k] = image_d[(i + 1) * width + j] - image_d[k]; // south derivative --- 1 floating point arithmetic operations west_deriv_d[k] = image_d[i * width + (j - 1)] - image_d[k]; // west derivative --- 1 floating point arithmetic operations east_deriv_d[k] = image_d[i * width + (j + 1)] - image_d[k]; // east derivative --- 1 floating point arithmetic operations gradient_square = (north_deriv_d[k] * north_deriv_d[k] + south_deriv_d[k] * south_deriv_d[k] + west_deriv_d[k] * west_deriv_d[k] + east_deriv_d[k] * east_deriv_d[k]) / (image_d[k] * image_d[k]); // 9 floating point arithmetic operations laplacian = (north_deriv_d[k] + south_deriv_d[k] + west_deriv_d[k] + east_deriv_d[k]) / image_d[k]; // 4 floating point arithmetic operations num = (0.5 * gradient_square) - ((1.0 / 16.0) * (laplacian * laplacian)); // 5 floating point arithmetic operations den = 1 + (.25 * laplacian); // 2 floating point arithmetic operations std_dev2 = num / (den * den); // 2 floating point arithmetic operations den = (std_dev2 - std_dev) / (std_dev * (1 + std_dev)); // 4 floating point arithmetic operations diff_coef_d[k] = 1.0 / (1.0 + den); // 2 floating point arithmetic operations if (diff_coef_d[k] < 0) { diff_coef_d[k] = 0; } else if (diff_coef_d[k] > 1) { diff_coef_d[k] = 1; } } else { return; } } // COMPUTE 2 // divergence and image update --- 10 floating point arithmetic operations per element -> 10*(height-1)*(width-1) in total __global__ void compute2(int height, int width, long k, unsigned char *image_d, float lambda, float diff_coef_north, float diff_coef_south, float diff_coef_west, float diff_coef_east, float divergence, float *diff_coef_d, float *north_deriv_d, float *south_deriv_d, float *west_deriv_d, float *east_deriv_d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i < height && j < width) { k = i * width + j; // get position of current element diff_coef_north = diff_coef_d[k]; // north diffusion coefficient diff_coef_south = diff_coef_d[(i + 1) * width + j]; // south diffusion coefficient diff_coef_west = diff_coef_d[k]; // west diffusion coefficient diff_coef_east = diff_coef_d[i * width + (j + 1)]; // east diffusion coefficient divergence = diff_coef_north * north_deriv_d[k] + diff_coef_south * south_deriv_d[k] + diff_coef_west * west_deriv_d[k] + diff_coef_east * east_deriv_d[k]; // --- 7 floating point arithmetic operations image_d[k] = image_d[k] + 0.25 * lambda * divergence; // --- 3 floating point arithmetic operations } else { return; } } // REDUCTION __global__ void summation(unsigned char *image_d, float *sum_d, float *sum2_d, int height, int width, int pixelWidth) { __shared__ float seg_sum[2 * 16]; int globalThreadId = blockDim.x * blockIdx.x + threadIdx.x; unsigned int threadId = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; int length = height * width * pixelWidth; if((start + threadId) < length) { seg_sum[threadId] = image_d[start + threadId]; } else { seg_sum[threadId] = 0.0; } if((start + blockDim.x + threadId) < length) { seg_sum[blockDim.x + threadId] = image_d[start + blockDim.x + threadId]; } else { seg_sum[blockDim.x + threadId] = 0.0; } for(unsigned int stage = blockDim.x; stage > 0; stage /= 2) { __syncthreads(); if(threadId < stage) { seg_sum[threadId] += seg_sum[threadId + stage]; } __syncthreads(); if(threadId == 0 && (globalThreadId * 2) < length){ sum_d[blockIdx.x] = seg_sum[threadId]; sum2_d[blockIdx.x] = seg_sum[threadId]*seg_sum[threadId]; } } } int main(int argc, char *argv[]) { // Part I: allocate and initialize variables double time_0, time_1, time_2, time_3, time_4, time_5, time_6, time_7, time_8; // time variables time_0 = get_time(); const char *filename = "input.pgm"; const char *outputname = "output.png"; int width, height, pixelWidth, n_pixels; int n_iter = 50; float lambda = 0.5; float mean, variance, std_dev; //local region statistics float *north_deriv, *south_deriv, *west_deriv, *east_deriv; // directional derivatives float *north_deriv_d, *south_deriv_d, *west_deriv_d, *east_deriv_d; // derivatives in the device float tmp, sum, sum2; // calculation variables float *sum_d; // variable in the device float *sum2_d; // variable in the device float gradient_square, laplacian, num, den, std_dev2, divergence; // calculation variables float *diff_coef; // diffusion coefficient float *diff_coef_d; //coeffıcent in the device float diff_coef_north, diff_coef_south, diff_coef_west, diff_coef_east; // directional diffusion coefficients long k; // current pixel index unsigned char *image_d; time_1 = get_time(); // Part II: parse command line arguments if(argc<2) { printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]); return(-1); } for(int ac=1;ac<argc;ac++) { if(MATCH("-i")) { filename = argv[++ac]; } else if(MATCH("-iter")) { n_iter = atoi(argv[++ac]); } else if(MATCH("-l")) { lambda = atof(argv[++ac]); } else if(MATCH("-o")) { outputname = argv[++ac]; } else { printf("Usage: %s [-i < filename>] [-iter <n_iter>] [-l <lambda>] [-o <outputfilename>]\n",argv[0]); return(-1); } } time_2 = get_time(); // Part III: read image printf("Reading image...\n"); unsigned char *image = stbi_load(filename, &width, &height, &pixelWidth, 0); if (!image) { fprintf(stderr, "Couldn't load image.\n"); return (-1); } printf("Image Read. Width : %d, Height : %d, nComp: %d\n",width,height,pixelWidth); n_pixels = height * width; time_3 = get_time(); // Part IV: allocate variables north_deriv = (float*) malloc(sizeof(float) * n_pixels); // north derivative south_deriv = (float*) malloc(sizeof(float) * n_pixels); // south derivative west_deriv = (float*) malloc(sizeof(float) * n_pixels); // west derivative east_deriv = (float*) malloc(sizeof(float) * n_pixels); // east derivative diff_coef = (float*) malloc(sizeof(float) * n_pixels); // diffusion coefficient // Allocations cudaMalloc((void**)&sum_d, sizeof(float)); cudaMalloc((void**)&sum2_d, sizeof(float)); cudaMalloc((void**)&north_deriv_d, sizeof(float)*n_pixels); cudaMalloc((void**)&south_deriv_d, sizeof(float)*n_pixels); cudaMalloc((void**)&east_deriv_d, sizeof(float)*n_pixels); cudaMalloc((void**)&west_deriv_d, sizeof(float)*n_pixels); cudaMalloc((void**)&image_d, (sizeof(unsigned char)*n_pixels) * pixelWidth); cudaMalloc((void**)&diff_coef_d, sizeof(float)*n_pixels); time_4 = get_time(); // Memory Copying to the device cudaMemcpy((void**)sum_d, &sum, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy((void**)sum2_d, &sum2, sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy((void**)north_deriv_d, north_deriv, sizeof(float)*n_pixels, cudaMemcpyHostToDevice); cudaMemcpy((void**)south_deriv_d, south_deriv, sizeof(float)*n_pixels, cudaMemcpyHostToDevice); cudaMemcpy((void**)east_deriv_d, east_deriv, sizeof(float)*n_pixels, cudaMemcpyHostToDevice); cudaMemcpy((void**)west_deriv_d, west_deriv, sizeof(float)*n_pixels, cudaMemcpyHostToDevice); cudaMemcpy((void**)image_d, image, (sizeof(unsigned char)*n_pixels) * pixelWidth, cudaMemcpyHostToDevice); cudaMemcpy((void**)diff_coef_d, diff_coef, sizeof(float)*n_pixels, cudaMemcpyHostToDevice); // setup execution configurations, creating 2D threads dim3 threads(16, 16, 1); dim3 grid(height/threads.x, width/threads.y); // Part V: compute --- n_iter * (3 * height * width + 42 * (height-1) * (width-1) + 6) floating point arithmetic operations in totaL for (int iter = 0; iter < n_iter; iter++) { sum = 0; sum2 = 0; // REDUCTION AND STATISTICS summation<<<grid, threads>>>(image_d, sum_d, sum2_d, height, width, pixelWidth); cudaDeviceSynchronize(); // Memory Copying to the host cudaMemcpy(&sum,sum_d,sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&sum2,sum2_d,sizeof(float), cudaMemcpyDeviceToHost); mean = sum / n_pixels; // --- 1 floating point arithmetic operations variance = (sum2 / n_pixels) - mean * mean; // --- 3 floating point arithmetic operations std_dev = variance / (mean * mean); // --- 2 floating point arithmetic operations // COMPUTE 1 compute1<<<grid,threads>>>(height-1, width-1, k, image_d, north_deriv_d, south_deriv_d, west_deriv_d, east_deriv_d, gradient_square, laplacian, num, den, std_dev, std_dev2, diff_coef_d); cudaDeviceSynchronize(); // COMPUTE 2 compute2<<<grid,threads>>>(height -1, width -1, k, image_d, lambda, diff_coef_north, diff_coef_south, diff_coef_west, diff_coef_east, divergence, diff_coef_d, north_deriv_d, south_deriv_d, west_deriv_d, east_deriv_d); // Memory Copying Output Image to the host cudaMemcpy(image,image_d,sizeof(unsigned char)*n_pixels * pixelWidth, cudaMemcpyDeviceToHost); } time_5 = get_time(); // Part VI: write image to file stbi_write_png(outputname, width, height, pixelWidth, image, 0); time_6 = get_time(); // Part VII: get average of sum of pixels for testing and calculate GFLOPS // FOR VALIDATION - DO NOT PARALLELIZE float test = 0; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { test += image[i * width + j]; } } test /= n_pixels; float gflops = (float) (n_iter * 1E-9 * (3 * height * width + 42 * (height-1) * (width-1) + 6)) / (time_5 - time_4); time_7 = get_time(); // Part VII: deallocate variables stbi_image_free(image); free(north_deriv); free(south_deriv); free(west_deriv); free(east_deriv); free(diff_coef); // Freeing the memory allocations cudaFree(image_d); cudaFree(north_deriv_d); cudaFree(south_deriv_d); cudaFree(west_deriv_d); cudaFree(east_deriv_d); cudaFree(sum_d); cudaFree(sum2_d); cudaFree(diff_coef_d); time_8 = get_time(); // print printf("Time spent in different stages of the application:\n"); printf("%9.6f s => Part I: allocate and initialize variables\n", (time_1 - time_0)); printf("%9.6f s => Part II: parse command line arguments\n", (time_2 - time_1)); printf("%9.6f s => Part III: read image\n", (time_3 - time_2)); printf("%9.6f s => Part IV: allocate variables\n", (time_4 - time_3)); printf("%9.6f s => Part V: compute\n", (time_5 - time_4)); printf("%9.6f s => Part VI: write image to file\n", (time_6 - time_5)); printf("%9.6f s => Part VII: get average of sum of pixels for testing and calculate GFLOPS\n", (time_7 - time_6)); printf("%9.6f s => Part VIII: deallocate variables\n", (time_7 - time_6)); printf("Total time: %9.6f s\n", (time_8 - time_0)); printf("Average of sum of pixels: %9.6f\n", test); printf("GFLOPS: %f\n", gflops); return 0; }
b0f21b1c5f4afd770387f97f562c14c0074c52d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addDiffuseKernel(float *data, int x, int y, float pressure) { data[NX * x + y] += pressure; }
b0f21b1c5f4afd770387f97f562c14c0074c52d8.cu
#include "includes.h" __global__ void addDiffuseKernel(float *data, int x, int y, float pressure) { data[NX * x + y] += pressure; }
083f033fae3c55445182ce18ba27ef702a25d286.hip
// !!! This is a file automatically generated by hipify!!! #include <UnitTest++.h> #include "../CrossingDistanceHelper.hh" #include "MonteRay_CartesianGrid.hh" namespace MonteRay_CartesianGrid_crossingDistance_GPU_tests{ using namespace MonteRay; SUITE( MonteRay_CartesianGrid_crossingDistance_GPU_Tests) { #ifdef __HIPCC__ typedef singleDimRayTraceMap_t distances_t; typedef singleDimRayTraceMap_t rayTraceMap_t; using Position_t = MonteRay_CartesianGrid::Position_t; class CartesianGridTester { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTester(){ std::vector<gpuRayFloat_t> vertices{ -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; pCart = std::make_unique<MonteRay_CartesianGrid>(3, std::array<MonteRay_GridBins, 3>{ MonteRay_GridBins{vertices}, MonteRay_GridBins{vertices}, MonteRay_GridBins{vertices} } ); } }; TEST_FIXTURE(CartesianGridTester, CrossingDistance_in_1D_PosXDir ) { Position_t position ( -9.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 1.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 1.0, distances.dist(1), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, CrossingDistance_in_1D_NegXDir ) { Position_t position ( -8.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 1.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.0, distances.dist(1), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Outside_negSide_negDir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 0, distances.size() ); } TEST_FIXTURE(CartesianGridTester, Outside_posSide_posDir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 0, distances.size() ); } TEST_FIXTURE(CartesianGridTester, Outside_negSide_posDir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Outside_posSide_negDir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 20, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 18, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Crossing_entire_grid_starting_outside_finish_outside_pos_dir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 21.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 22, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 2.5, distances.dist(2), 1e-6 ); CHECK_EQUAL( 17, distances.id(18) ); CHECK_CLOSE( 18.5, distances.dist(18), 1e-6 ); CHECK_EQUAL( 18, distances.id(19) ); CHECK_CLOSE( 19.5, distances.dist(19), 1e-6 ); CHECK_EQUAL( 19, distances.id(20) ); CHECK_CLOSE( 20.5, distances.dist(20), 1e-6 ); CHECK_EQUAL( 20, distances.id(21) ); CHECK_CLOSE( 21.0, distances.dist(21), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Crossing_entire_grid_starting_outside_finish_outside_neg_dir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 21.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 22, distances.size() ); CHECK_EQUAL( 20, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 18, distances.id(2) ); CHECK_CLOSE( 2.5, distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(18) ); CHECK_CLOSE( 18.5, distances.dist(18), 1e-6 ); CHECK_EQUAL( 1, distances.id(19) ); CHECK_CLOSE( 19.5, distances.dist(19), 1e-6 ); CHECK_EQUAL( 0, distances.id(20) ); CHECK_CLOSE( 20.5, distances.dist(20), 1e-6 ); CHECK_EQUAL( -1, distances.id(21) ); CHECK_CLOSE( 21.0, distances.dist(21), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Inside_cross_out_negDir ) { Position_t position ( -8.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( -1, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Inside_cross_out_posDir ) { Position_t position ( 8.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 18, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 20, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } class CartesianGridTesterTwo { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTesterTwo(){ pCart = std::make_unique<MonteRay_CartesianGrid>(3, MonteRay_GridBins{-1, 1, 2}, MonteRay_GridBins{-1, 1, 2}, MonteRay_GridBins{-1, 1, 2} ); } }; TEST_FIXTURE(CartesianGridTesterTwo, crossingDistance_2D_internal_hit_corner_posXDir_posYDir ) { Position_t position ( -.5, -.5, -.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 1.0*std::sqrt(2.0); unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( (0.5)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( (0.5)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); } class CartesianGridTesterThree { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTesterThree(){ pCart = std::make_unique<MonteRay_CartesianGrid>(3, MonteRay_GridBins{0, 3, 3}, MonteRay_GridBins{0, 3, 3}, MonteRay_GridBins{0, 3, 3} ); } }; TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_on_an_external_corner_posX_posY ) { Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_on_an_external_corner_negX_negY ) { Position_t position ( 3.0, 3.0, 0.5 ); Position_t direction( -1.0, -1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_outside_on_an_external_corner_posX_posY ) { Position_t position ( -1.0, -1.0, 0.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_outside_an_external_corner_negX_negY ) { Position_t position ( 4.0, 4.0, 0.5 ); Position_t direction( -1.0, -1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } #endif } }
083f033fae3c55445182ce18ba27ef702a25d286.cu
#include <UnitTest++.h> #include "../CrossingDistanceHelper.hh" #include "MonteRay_CartesianGrid.hh" namespace MonteRay_CartesianGrid_crossingDistance_GPU_tests{ using namespace MonteRay; SUITE( MonteRay_CartesianGrid_crossingDistance_GPU_Tests) { #ifdef __CUDACC__ typedef singleDimRayTraceMap_t distances_t; typedef singleDimRayTraceMap_t rayTraceMap_t; using Position_t = MonteRay_CartesianGrid::Position_t; class CartesianGridTester { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTester(){ std::vector<gpuRayFloat_t> vertices{ -10, -9, -8, -7, -6, -5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}; pCart = std::make_unique<MonteRay_CartesianGrid>(3, std::array<MonteRay_GridBins, 3>{ MonteRay_GridBins{vertices}, MonteRay_GridBins{vertices}, MonteRay_GridBins{vertices} } ); } }; TEST_FIXTURE(CartesianGridTester, CrossingDistance_in_1D_PosXDir ) { Position_t position ( -9.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 1.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( 1.0, distances.dist(1), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, CrossingDistance_in_1D_NegXDir ) { Position_t position ( -8.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 1.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.0, distances.dist(1), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Outside_negSide_negDir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 0, distances.size() ); } TEST_FIXTURE(CartesianGridTester, Outside_posSide_posDir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 0, distances.size() ); } TEST_FIXTURE(CartesianGridTester, Outside_negSide_posDir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Outside_posSide_negDir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 20, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 18, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Crossing_entire_grid_starting_outside_finish_outside_pos_dir ) { Position_t position ( -10.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 21.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 22, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( 2.5, distances.dist(2), 1e-6 ); CHECK_EQUAL( 17, distances.id(18) ); CHECK_CLOSE( 18.5, distances.dist(18), 1e-6 ); CHECK_EQUAL( 18, distances.id(19) ); CHECK_CLOSE( 19.5, distances.dist(19), 1e-6 ); CHECK_EQUAL( 19, distances.id(20) ); CHECK_CLOSE( 20.5, distances.dist(20), 1e-6 ); CHECK_EQUAL( 20, distances.id(21) ); CHECK_CLOSE( 21.0, distances.dist(21), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Crossing_entire_grid_starting_outside_finish_outside_neg_dir ) { Position_t position ( 10.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 21.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 22, distances.size() ); CHECK_EQUAL( 20, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 18, distances.id(2) ); CHECK_CLOSE( 2.5, distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(18) ); CHECK_CLOSE( 18.5, distances.dist(18), 1e-6 ); CHECK_EQUAL( 1, distances.id(19) ); CHECK_CLOSE( 19.5, distances.dist(19), 1e-6 ); CHECK_EQUAL( 0, distances.id(20) ); CHECK_CLOSE( 20.5, distances.dist(20), 1e-6 ); CHECK_EQUAL( -1, distances.id(21) ); CHECK_CLOSE( 21.0, distances.dist(21), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Inside_cross_out_negDir ) { Position_t position ( -8.5, 0.5, 0.5 ); Position_t direction( -1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 1, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( -1, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } TEST_FIXTURE(CartesianGridTester, Inside_cross_out_posDir ) { Position_t position ( 8.5, 0.5, 0.5 ); Position_t direction( 1, 0, 0 ); gpuRayFloat_t distance = 2.0; distances_t distances = crossingDistance(pCart.get(), 0, position[0], direction[0], distance); CHECK_EQUAL( 3, distances.size() ); CHECK_EQUAL( 18, distances.id(0) ); CHECK_CLOSE( 0.5, distances.dist(0), 1e-6 ); CHECK_EQUAL( 19, distances.id(1) ); CHECK_CLOSE( 1.5, distances.dist(1), 1e-6 ); CHECK_EQUAL( 20, distances.id(2) ); CHECK_CLOSE( 2.0, distances.dist(2), 1e-6 ); } class CartesianGridTesterTwo { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTesterTwo(){ pCart = std::make_unique<MonteRay_CartesianGrid>(3, MonteRay_GridBins{-1, 1, 2}, MonteRay_GridBins{-1, 1, 2}, MonteRay_GridBins{-1, 1, 2} ); } }; TEST_FIXTURE(CartesianGridTesterTwo, crossingDistance_2D_internal_hit_corner_posXDir_posYDir ) { Position_t position ( -.5, -.5, -.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 1.0*std::sqrt(2.0); unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( (0.5)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 2, distances.size() ); CHECK_EQUAL( 0, distances.id(0) ); CHECK_CLOSE( (0.5)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 1, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); } class CartesianGridTesterThree { public: std::unique_ptr<MonteRay_CartesianGrid> pCart; CartesianGridTesterThree(){ pCart = std::make_unique<MonteRay_CartesianGrid>(3, MonteRay_GridBins{0, 3, 3}, MonteRay_GridBins{0, 3, 3}, MonteRay_GridBins{0, 3, 3} ); } }; TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_on_an_external_corner_posX_posY ) { Position_t position ( 0.0, 0.0, 0.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_on_an_external_corner_negX_negY ) { Position_t position ( 3.0, 3.0, 0.5 ); Position_t direction( -1.0, -1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (0.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_outside_on_an_external_corner_posX_posY ) { Position_t position ( -1.0, -1.0, 0.5 ); Position_t direction( 1.0, 1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( -1, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 0, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 2, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( 3, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } TEST_FIXTURE(CartesianGridTesterThree, crossingDistance_2D_start_outside_an_external_corner_negX_negY ) { Position_t position ( 4.0, 4.0, 0.5 ); Position_t direction( -1.0, -1.0, 0.0 ); direction.normalize(); gpuRayFloat_t distance = 10.0; unsigned dim = 0; distances_t distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); dim = 1; distances.clear(); distances = crossingDistance(pCart.get(), dim, position[dim], direction[dim], distance); CHECK_EQUAL( 5, distances.size() ); CHECK_EQUAL( 3, distances.id(0) ); CHECK_CLOSE( (1.0)*std::sqrt(2.0), distances.dist(0), 1e-6 ); CHECK_EQUAL( 2, distances.id(1) ); CHECK_CLOSE( (2.0)*std::sqrt(2.0), distances.dist(1), 1e-6 ); CHECK_EQUAL( 1, distances.id(2) ); CHECK_CLOSE( (3.0)*std::sqrt(2.0), distances.dist(2), 1e-6 ); CHECK_EQUAL( 0, distances.id(3) ); CHECK_CLOSE( (4.0)*std::sqrt(2.0), distances.dist(3), 1e-6 ); CHECK_EQUAL( -1, distances.id(4) ); CHECK_CLOSE( 10.0, distances.dist(4), 1e-6 ); } #endif } }
8052d821a475eee93ced6f625597065c40b977a1.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <string> #include <fstream> #include <algorithm> // CUDA include #ifdef __HIPCC__ #include "device_launch_parameters.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include "hip/hip_runtime.h" #endif // OPENGL include #include <GL/glut.h> #include <GL/freeglut.h> #include "flow.h" // STIM include #include <stim/visualization/gl_aaboundingbox.h> #include <stim/parser/arguments.h> #include <stim/visualization/camera.h> #include <stim/visualization/colormap.h> #include <stim/cuda/cudatools/error.h> #include <stim/grids/image_stack.h> //********************parameter setting******************** // overall parameters std::string units; // units used in this program int vX, vY; float dx, dy, dz; // x, y and z image scaling(units/pixel) std::string stackdir = ""; // directory where image stacks will be stored stim::arglist args; // create an instance of arglist stim::gl_aaboundingbox<float> bb; // axis-aligned bounding box object stim::camera cam; // camera object unsigned num_edge; // number of edges in the network unsigned num_vertex; // number of vertex in the network std::vector<unsigned> pendant_vertex; // list of pendant vertex index in GT std::vector<std::string> menu_option = { "simulation", "build inlet/outlet", "manufacture", "adjustment" }; stim::flow<float> flow; // flow object stim::flow<float> backup; // flow backup float move_pace; // camera moving parameter float u; // viscosity float rou; // density float max_v; float min_v; int mods; // special keyboard input std::vector<unsigned char> color; // velocity color map std::vector<int> velocity_bar; // velocity bar float length = 40.0f; // cuboid length float scale = 1.0f; // scale factor bool image_stack = false; // flag indicates an image stack been loaded stim::image_stack<unsigned char, float> S; // image stack float binary_threshold = 128; // threshold for binary transformation float in = 0.0f; // total input volume flow rate float out = 0.0f; float Rt = 0.0f; // total resistance float Qn = 0.0f; // required input total volume flow rate GLint dlist; // simulation display list bool undo = false; // delete display list // hard-coded parameters float camera_factor = 1.2f; // start point of the camera as a function of X and Y size float orbit_factor = 0.01f; // degrees per pixel used to orbit the camera float zoom_factor = 10.0f; // zooming factor float border_factor = 20.0f; // border float radii_factor = 1.0f; // radii changing factor GLint subdivision = 20; // slices and stacks float default_radius = 5.0f; // default radii of network vertex float delta = 0.01f; // small discrepancy float eps = 20.0f; // epsilon threshold float max_pressure = 0.0f; // maximum pressure that the channel can bear float height_threshold = 100.0f; // connection height constraint float fragment_ratio = 0.0f; // fragment ratio // glut event parameters int mouse_x; // window x-coordinate int mouse_y; // window y-coordinate int picked_x; // picked window x-coordinate int picked_y; // picked window y-coordinate bool LTbutton = false; // true means down while false means up // simulation parameters bool render_direction = false; // flag indicates rendering flow direction for one edge bool simulation = false; // flag indicates simulation mode bool color_bound = false; // flag indicates velocity color map bound bool to_select_pressure = false; // flag indicates having selected a vertex to modify pressure bool mark_index = true; // flag indicates marking the index near the vertex bool glyph_mode = false; // flag indicates rendering glyph for flow velocity field bool frame_mode = false; // flag indicates rendering filament framing structrues bool subdivided = false; // flag indicates subdivision status unsigned pressure_index; // the index of vertex that is clicked unsigned direction_index = UINT_MAX;// the index of edge that is pointed at std::vector<stim::vec3<float> > back_vertex; // vertex back up for marking indices // build inlet/outlet parameters bool build_inlet_outlet = false; // flag indicates building inlets and outlets bool modified_bridge = false; // flag indicates having modified inlet/outlet connection bool hilbert_curve = false; // flag indicates enabling hilbert curves constructions bool change_fragment = false; // flag indicates changing fragment for square wave connections bool picked_connection = false; // flag indicates picked one connection bool render_new_connection = false; // flag indicates rendering new line connection in trasparency bool redisplay = false; // flag indicates redisplay rendering bool connection_done = false; // flag indicates finishing connections bool render_flow_rate = false; // flag indicates rendering total volume flow rate unsigned connection_index = UINT_MAX;// the index of connection that is picked unsigned port_index = 0; // inlet (0) or outlet (1) stim::vec3<float> tmp_v1, tmp_v2; // temp vertex int coef; // computational coefficient factor // manufacture parameters bool manufacture = false; // flag indicates manufacture mode //********************helper function********************* // get the network basic information inline void get_background() { pendant_vertex = flow.get_pendant_vertex(); num_edge = flow.edges(); num_vertex = flow.vertices(); // set the initial radii flow.init(num_edge, num_vertex); // initialize flow object // if no radius information laoded if (!flow.get_radius(0, 0)) for (unsigned i = 0; i < num_edge; i++) flow.set_r(i, default_radius); } // convert from window coordinates to world coordinates inline void window_to_world(GLdouble &x, GLdouble &y, GLdouble &z) { GLint viewport[4]; GLdouble modelview[16]; GLdouble projection[16]; GLdouble winX, winY; GLfloat winZ; glGetIntegerv(GL_VIEWPORT, viewport); glGetDoublev(GL_MODELVIEW_MATRIX, modelview); glGetDoublev(GL_PROJECTION_MATRIX, projection); winX = (GLdouble)mouse_x; winY = viewport[3] - (GLdouble)mouse_y; glReadPixels((GLint)winX, (GLint)winY, (GLsizei)1, (GLsizei)1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ); gluUnProject(winX, winY, winZ, modelview, projection, viewport, &x, &y, &z); } // convert current image stack into a binary mask #ifdef __HIPCC__ template <typename T, typename F> __global__ void binary_transform(size_t N, T* ptr, F threshold) { size_t ix = blockDim.x * blockIdx.x + threadIdx.x; if (ix >= N) return; // avoid seg-fault if (ptr[ix] >= threshold) // binary transformation ptr[ix] = 0; else ptr[ix] = 255; } #endif //********************simulation function********************** // initialize flow object void flow_initialize() { flow.set = true; stim::vec3<float> center = bb.center(); flow.P.clear(); flow.P.resize(num_vertex, 0); // clear up initialized pressure for (unsigned i = 0; i < pendant_vertex.size(); i++) { if (flow.get_vertex(pendant_vertex[i])[0] <= center[0]) flow.P[pendant_vertex[i]] = max_pressure - i * delta; // should set minor discrepancy else flow.P[pendant_vertex[i]] = (i + 1) * delta; // algorithm treat 0 as no initial pressure } } // find the stable flow state void flow_stable_state() { flow.solve_flow(u); flow.get_color_map(max_v, min_v, color, pendant_vertex); color_bound = true; velocity_bar.resize(num_edge); for (unsigned i = 0; i < num_edge; i++) velocity_bar[i] = i; std::sort(velocity_bar.begin(), velocity_bar.end(), [&](int x, int y) {return abs(flow.v[x]) < abs(flow.v[y]); }); } // adjustment on input volume flow rate and corresponding flow simulation void adjustment() { system("CLS"); // clear up console box std::cout << "Please enter the input total volume flow rate: " << std::endl; std::cin >> Qn; flow.adjust(in, out, Rt, Qn, u); } //********************glut function******************** // dynamically set menu // @param num: number of current menu options // @param range: range of option to be set from menu_option list void glut_set_menu(int num, int range) { // remove last time menu options for (int i = 1; i < num + 1; i++) glutRemoveMenuItem(1); // set new menu options std::string menu_name; for (int i = 1; i < range + 1; i++) { menu_name = menu_option[i - 1]; glutAddMenuEntry(menu_name.c_str(), i); } } // set up the squash transform to whole screen void glut_projection() { glMatrixMode(GL_PROJECTION); // load the projection matrix for editing glLoadIdentity(); // start with the identity matrix vX = glutGet(GLUT_WINDOW_WIDTH); // use the whole screen for rendering vY = glutGet(GLUT_WINDOW_HEIGHT); glViewport(0, 0, vX, vY); // specify a viewport for the entire window float aspect = (float)vX / (float)vY; // calculate the aspect ratio gluPerspective(60, aspect, 0.1, 1000000); // set up a perspective projection } // translate camera to origin void glut_modelview() { glMatrixMode(GL_MODELVIEW); // load the modelview matrix for editing glLoadIdentity(); // start with the identity matrix stim::vec3<float> eye = cam.getPosition(); // get the camera position (eye point) stim::vec3<float> focus = cam.getLookAt(); // get the camera focal point stim::vec3<float> up = cam.getUp(); // get the camera "up" orientation gluLookAt(eye[0], eye[1], eye[2], focus[0], focus[1], focus[2], up[0], up[1], up[2]); // set up the OpenGL camera } // glut render function void glut_render() { glEnable(GL_DEPTH_TEST); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(1.0f, 1.0f, 1.0f, 1.0f); glut_projection(); glut_modelview(); if (!simulation && !build_inlet_outlet || manufacture) { glColor3f(0.0f, 0.0f, 0.0f); flow.glCylinder0(scale, undo); } else { flow.bounding_box(); // bounding box if (num_vertex > 100) { // if the network is big enough (say 100), use display list if (undo) { // undo rendering list undo = false; glDeleteLists(dlist, 1); } if (!glIsList(dlist)) { dlist = glGenLists(1); glNewList(dlist, GL_COMPILE); // render network if (!glyph_mode) { flow.glSolidSphere(max_pressure, subdivision, scale); if (mark_index) flow.mark_vertex(back_vertex, scale); //flow.glSolidCone(subdivision); flow.glSolidCylinder(direction_index, color, subdivision, scale); } // render glyphs else flow.glyph(color, subdivision, scale, frame_mode); glEndList(); } glCallList(dlist); } else { // small network // render network if (!glyph_mode) { flow.glSolidSphere(max_pressure, subdivision, scale); if (mark_index) { flow.mark_vertex(back_vertex, scale); //flow.mark_edge(); } //flow.glSolidCone(subdivision); flow.glSolidCylinder(direction_index, color, subdivision, scale); } // render glyphs else flow.glyph(color, subdivision, scale, frame_mode); } flow.glSolidCuboid(subdivision, manufacture, length); // render bus source if (render_direction && !glyph_mode) // render the flow direction of the vertex pointed flow.glSolidCone(direction_index, subdivision, scale); } if (build_inlet_outlet) flow.line_bridge(redisplay); if (manufacture) { flow.glSolidCuboid(subdivision, manufacture, length); flow.tube_bridge(redisplay, subdivision, scale); } if (picked_connection && render_new_connection) { glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glColor4f(0.0f, 0.0f, 0.0f, 0.4f); glBegin(GL_LINE_STRIP); if (!port_index) { glVertex3f(flow.inlet[connection_index].V[1][0], flow.inlet[connection_index].V[1][1], flow.inlet[connection_index].V[1][2]); glVertex3f(tmp_v1[0], tmp_v1[1], tmp_v1[2]); glVertex3f(tmp_v2[0], tmp_v2[1], tmp_v2[2]); glVertex3f(flow.inlet[connection_index].V[2][0], flow.inlet[connection_index].V[2][1], flow.inlet[connection_index].V[2][2]); } else { glVertex3f(flow.outlet[connection_index].V[1][0], flow.outlet[connection_index].V[1][1], flow.outlet[connection_index].V[1][2]); glVertex3f(tmp_v1[0], tmp_v1[1], tmp_v1[2]); glVertex3f(tmp_v2[0], tmp_v2[1], tmp_v2[2]); glVertex3f(flow.outlet[connection_index].V[2][0], flow.outlet[connection_index].V[2][1], flow.outlet[connection_index].V[2][2]); } glEnd(); glFlush(); glDisable(GL_BLEND); } // render bars // bring up a pressure bar on left if (to_select_pressure) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(0.0, 0.0, 1.0); // blue to red glVertex2f(border_factor, border_factor); glColor3f(1.0, 0.0, 0.0); glVertex2f(border_factor, (vY - 2.0f * border_factor)); glEnd(); glFlush(); // pressure bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Pressure Bar"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // pressure range text float step = vY - 3.0f * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f((border_factor * 1.5f), (border_factor + i * step)); std::stringstream ss_n; ss_n << (float)i * max_pressure / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } // bring up a velocity bar on left if ((simulation || build_inlet_outlet) && !to_select_pressure && !change_fragment) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); float step = (vY - 3 * border_factor); step /= BREWER_CTRL_PTS - 1; for (unsigned i = 0; i < BREWER_CTRL_PTS - 1; i++) { glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(BREWERCP[i * 4 + 0], BREWERCP[i * 4 + 1], BREWERCP[i * 4 + 2]); glVertex2f(border_factor, border_factor + i * step); glColor3f(BREWERCP[(i + 1) * 4 + 0], BREWERCP[(i + 1) * 4 + 1], BREWERCP[(i + 1) * 4 + 2]); glVertex2f(border_factor, border_factor + (i + 1) * step); glEnd(); } glFlush(); // pressure bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Velocity range"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // pressure range text step = vY - 3 * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f(border_factor * 1.5f, border_factor + i * step); std::stringstream ss_n; ss_n << min_v + i * (max_v - min_v) / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } // bring up a ratio bar on the left if (change_fragment) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(0.0, 0.0, 1.0); // blue to red glVertex2f(border_factor, border_factor); glColor3f(1.0, 0.0, 0.0); glVertex2f(border_factor, (vY - 2.0f * border_factor)); glEnd(); glFlush(); // ratio bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Ratio bar"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // ratio range text float step = vY - 3.0f * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f((border_factor * 1.5f), (border_factor + i * step)); std::stringstream ss_n; ss_n << (float)i * 1.0f / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } if (build_inlet_outlet) if (render_flow_rate) flow.display_flow_rate(in, out); glutSwapBuffers(); } // register glut menu options void glut_menu(int value) { int num = glutGet(GLUT_MENU_NUM_ITEMS); if (value == 1) { simulation = true; build_inlet_outlet = false; render_flow_rate = false; manufacture = false; modified_bridge = false; change_fragment = false; connection_done = false; // first time if (!flow.set) { // only first time simulation called "simulation", ^_^ get_background(); // get the graph information back_vertex = flow.back_vertex(); // vertex back up for marking indices flow_initialize(); // initialize flow condition menu_option[0] = "resimulation"; } // simulation / resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); if (!glyph_mode) glut_set_menu(num, 2); } if (value == 2) { simulation = false; build_inlet_outlet = true; manufacture = false; if (!modified_bridge && !connection_done) { flow.set_main_feeder(); flow.build_synthetic_connection(u, default_radius); flow.check_direct_connection(); // check whether direct connections intersect each other connection_done = true; } else if (modified_bridge) { modified_bridge = false; redisplay = true; flow.clear_synthetic_connection(); } glut_set_menu(num, 4); } if (value == 3) { simulation = false; build_inlet_outlet = false; manufacture = true; glyph_mode = false; // manufacuture mode doesn't need flow direction redisplay = true; } if (value == 4) { simulation = true; build_inlet_outlet = false; render_flow_rate = false; manufacture = false; adjustment(); // adjust network flow accordingly glut_set_menu(num, 1); } glutPostRedisplay(); } // defines camera motion based on mouse dragging void glut_motion(int x, int y) { mods = glutGetModifiers(); if (LTbutton && mods == 0) { float theta = orbit_factor * (mouse_x - x); // determine the number of degrees along the x-axis to rotate float phi = orbit_factor * (y - mouse_y); // number of degrees along the y-axis to rotate cam.OrbitFocus(theta, phi); // rotate the camera around the focal point } mouse_x = x; // update the mouse position mouse_y = y; glutPostRedisplay(); // re-draw the visualization } // defines passive mouse motion function void glut_passive_motion(int x, int y) { mods = glutGetModifiers(); // check whether the mouse point near to an edge GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates if (simulation || build_inlet_outlet && !mods) { bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, direction_index); if (flag && !glyph_mode) render_direction = true; else if (!flag && !glyph_mode) { if (render_direction) // if the direction is displaying currently, do a short delay Sleep(300); render_direction = false; direction_index = -1; } undo = true; } if (mods == GLUT_ACTIVE_SHIFT && picked_connection) { render_new_connection = true; size_t i; if (!port_index) { tmp_v1 = stim::vec3<float>(flow.inlet[connection_index].V[1][0], flow.inlet[connection_index].V[1][1] + (float)(picked_y - y), flow.inlet[connection_index].V[1][2]); tmp_v2 = stim::vec3<float>(flow.inlet[connection_index].V[2][0], flow.inlet[connection_index].V[2][1] + (float)(picked_y - y), flow.inlet[connection_index].V[2][2]); i = flow.inlet[connection_index].V.size(); if (coef * tmp_v1[1] < coef * flow.inlet[connection_index].V[i - 1][1]) { tmp_v1[1] = flow.inlet[connection_index].V[i - 1][1]; tmp_v2[1] = flow.inlet[connection_index].V[i - 1][1]; } } else { tmp_v1 = stim::vec3<float>(flow.outlet[connection_index].V[1][0], flow.outlet[connection_index].V[1][1] + (float)(picked_y - y), flow.outlet[connection_index].V[1][2]); tmp_v2 = stim::vec3<float>(flow.outlet[connection_index].V[2][0], flow.outlet[connection_index].V[2][1] + (float)(picked_y - y), flow.outlet[connection_index].V[2][2]); i = flow.outlet[connection_index].V.size(); if (coef * tmp_v1[1] < coef * flow.outlet[connection_index].V[i - 1][1]) { tmp_v1[1] = flow.outlet[connection_index].V[i - 1][1]; tmp_v2[1] = flow.outlet[connection_index].V[i - 1][1]; } } } else if (mods == GLUT_ACTIVE_CTRL && picked_connection) { render_new_connection = true; if (!port_index) { tmp_v1 = stim::vec3<float>(flow.inlet[connection_index].V[0][0] + (float)(x - picked_x), flow.inlet[connection_index].V[0][1], flow.inlet[connection_index].V[0][2]); tmp_v2 = stim::vec3<float>(flow.inlet[connection_index].V[1][0] + (float)(x - picked_x), flow.inlet[connection_index].V[1][1], flow.inlet[connection_index].V[1][2]); if (tmp_v1[0] < flow.main_feeder[port_index][0] - length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] - length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] - length / 2; } else if (tmp_v1[0] > flow.main_feeder[port_index][0] + length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] + length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] + length / 2; } } else { tmp_v1 = stim::vec3<float>(flow.outlet[connection_index].V[0][0] + (float)(x - picked_x), flow.outlet[connection_index].V[0][1], flow.outlet[connection_index].V[0][2]); tmp_v2 = stim::vec3<float>(flow.outlet[connection_index].V[1][0] + (float)(x - picked_x), flow.outlet[connection_index].V[1][1], flow.outlet[connection_index].V[1][2]); if (tmp_v1[0] > flow.main_feeder[port_index][0] + length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] + length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] + length / 2; } else if (tmp_v1[0] < flow.main_feeder[port_index][0] - length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] - length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] - length / 2; } } } else render_new_connection = false; mouse_x = x; mouse_y = y; glutPostRedisplay(); // re-draw the visualization } // get click window coordinates void glut_mouse(int button, int state, int x, int y) { mods = glutGetModifiers(); // get special keyboard input mouse_x = x; mouse_y = y; if (!mods) { picked_connection = false; render_new_connection = false; } if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) LTbutton = true; else if (button == GLUT_LEFT_BUTTON && state == GLUT_UP) LTbutton = false; if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && simulation && !to_select_pressure) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_vertex((float)posX, (float)posY, (float)posZ, eps, scale, pressure_index); if (flag) { std::vector<unsigned>::iterator it = std::find(pendant_vertex.begin(), pendant_vertex.end(), pressure_index); if (it != pendant_vertex.end()) // if it is dangle vertex to_select_pressure = true; } } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && simulation && to_select_pressure) { if (y >= 2 * border_factor && y <= vY - border_factor) { // within the pressure bar range to_select_pressure = false; float tmp_pressure = (float)(vY - y - border_factor) / ((float)vY - 3.0f * border_factor) * max_pressure; flow.set_pressure(pressure_index, tmp_pressure); //flow_stable_state(); // main function of solving the linear system //flow.print_flow(); } } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && modified_bridge && change_fragment) { if (y >= 2 * border_factor && y <= vY - border_factor) // within the ratio bar range fragment_ratio = (float)(vY - y - border_factor) / ((float)vY - 3.0f * border_factor) * 1.0f; else if (y < 2 * border_factor) fragment_ratio = 1.0f; else if (y > vY - border_factor) fragment_ratio = 0.0f; change_fragment = false; render_flow_rate = true; flow.modify_synthetic_connection(u, rou, hilbert_curve, height_threshold, in, out, 2, fragment_ratio, default_radius); } // move connections along y-axis else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_SHIFT && !modified_bridge && !picked_connection) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, connection_index, port_index); if (flag) { picked_connection = true; picked_x = x; picked_y = y; if (!port_index) if (flow.inlet[connection_index].V[2][1] > flow.main_feeder[port_index][1]) coef = 1; else coef = -1; else if (flow.outlet[connection_index].V[2][1] > flow.main_feeder[port_index][1]) coef = 1; else coef = -1; } else picked_connection = false; } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_SHIFT && !modified_bridge && render_new_connection) { float l = 0.0f; std::vector<typename stim::vec3<float> > V; size_t i; if (!port_index) { i = flow.inlet[connection_index].V.size(); if (tmp_v2[1] != flow.inlet[connection_index].V[i - 1][1]) { V.resize(4); V[0] = flow.inlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; V[3] = flow.inlet[connection_index].V[i - 1]; std::swap(flow.inlet[connection_index].V, V); } else { V.resize(3); V[0] = flow.inlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; std::swap(flow.inlet[connection_index].V, V); } // calculate new length for (unsigned i = 0; i < flow.inlet[connection_index].V.size() - 1; i++) { l += (flow.inlet[connection_index].V[i + 1] - flow.inlet[connection_index].V[i]).len(); } flow.inlet[connection_index].l = l; } else { i = flow.outlet[connection_index].V.size(); if (tmp_v2[1] != flow.outlet[connection_index].V[i - 1][1]) { V.resize(4); V[0] = flow.outlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; V[3] = flow.outlet[connection_index].V[i - 1]; std::swap(flow.outlet[connection_index].V, V); } else { V.resize(3); V[0] = flow.outlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; std::swap(flow.outlet[connection_index].V, V); } // calculate new length for (unsigned i = 0; i < flow.outlet[connection_index].V.size() - 1; i++) { l += (flow.outlet[connection_index].V[i + 1] - flow.outlet[connection_index].V[i]).len(); } flow.outlet[connection_index].l = l; } redisplay = true; render_new_connection = false; picked_connection = false; flow.check_direct_connection(); flow.backup(); // back up direct synthetic connections } // move connections along x-axis else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_CTRL && !modified_bridge && !picked_connection) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, connection_index, port_index); if (flag) { picked_connection = true; picked_x = x; picked_y = y; if (!port_index) coef = 1; else coef = -1; } else picked_connection = false; } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_CTRL && !modified_bridge && render_new_connection) { float l = 0.0f; if (!port_index) { flow.inlet[connection_index].V[0] = tmp_v1; flow.inlet[connection_index].V[1] = tmp_v2; // calculate new length for (unsigned i = 0; i < flow.inlet[connection_index].V.size() - 1; i++) { l += (flow.inlet[connection_index].V[i + 1] - flow.inlet[connection_index].V[i]).len(); } flow.inlet[connection_index].l = l; } else { flow.outlet[connection_index].V[0] = tmp_v1; flow.outlet[connection_index].V[1] = tmp_v2; // calculate new length for (unsigned i = 0; i < flow.outlet[connection_index].V.size() - 1; i++) { l += (flow.outlet[connection_index].V[i + 1] - flow.outlet[connection_index].V[i]).len(); } flow.outlet[connection_index].l = l; } redisplay = true; render_new_connection = false; picked_connection = false; flow.check_direct_connection(); flow.backup(); } } // define camera move based on mouse wheel move void glut_wheel(int wheel, int direction, int x, int y) { mods = glutGetModifiers(); mouse_x = x; mouse_y = y; GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates if (!to_select_pressure && (simulation || build_inlet_outlet || manufacture)) { // check current pixel position only in simualtion and build_inlet_outlet modes bool flag = flow.epsilon_vertex((float)posX, (float)posY, (float)posZ, eps, scale, pressure_index); if (flag && simulation && !glyph_mode) { float tmp_r; if (direction > 0) { // increase radii tmp_r = flow.get_radius(pressure_index); tmp_r += radii_factor; } else { tmp_r = flow.get_radius(pressure_index); tmp_r -= radii_factor; if (tmp_r <= 0) tmp_r = default_radius; } flow.set_radius(pressure_index, tmp_r); undo = true; // undo rendering } else if (!mods) { if (direction > 0) // if it is button 3(up), move closer move_pace = zoom_factor; else // if it is button 4(down), leave farther move_pace = -zoom_factor; cam.Push(move_pace); } } // rescale if (mods == GLUT_ACTIVE_CTRL) { if (direction > 0) { if (scale >= 1) scale += 1.0f; else scale += 0.1f; } else { if (scale > 1) scale -= 1.0f; else if (scale <= 1 && scale > 0.1f) scale -= 0.1f; else scale = 1.0f; } undo = true; redisplay = true; } glutPostRedisplay(); } // define keyboard inputs void glut_keyboard(unsigned char key, int x, int y) { // register different keyboard operation switch (key) { // saving network flow profile case 's': flow.save_network(); break; // convert network to binary format (.nwt) case 'c': { std::vector<std::string> tmp = stim::parser::split(args.arg(0), '.'); std::stringstream ss; ss << tmp[0] << ".nwt"; std::string filename = ss.str(); flow.saveNwt(filename); break; } // subdivide current network for more detailed calculation case 'd': { // subdivide current network due to the limitation of current computation if needed if (!subdivided && simulation && !glyph_mode) { subdivided = true; // check whether current network can be subdivided if (flow.check_subdivision()) { flow.subdivision(); get_background(); } flow_initialize(); // initialize flow condition // resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); undo = true; } else if (subdivided && simulation && !glyph_mode) { subdivided = false; flow = backup; // load back up get_background(); flow_initialize(); // resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); undo = true; } break; } // flow vector field visualization, Glyphs case 'f': if (glyph_mode && !manufacture && (simulation || build_inlet_outlet)) { glyph_mode = false; frame_mode = false; redisplay = true; // lines and arrows rendering use the same display list int num = glutGet(GLUT_MENU_NUM_ITEMS); if (num == 1) glut_set_menu(num, 2); } else if (!glyph_mode && !manufacture && (simulation || build_inlet_outlet)) { glyph_mode = true; redisplay = true; int num = glutGet(GLUT_MENU_NUM_ITEMS); if (num == 2) glut_set_menu(num, 1); } undo = true; break; // filaments around arrows case 'g': if (glyph_mode) { if (frame_mode) frame_mode = false; else frame_mode = true; } undo = true; break; // open/close index marks case 'e': if (mark_index) mark_index = false; else mark_index = true; undo = true; break; // output image stack case 'm': if (manufacture) { #ifdef __HIPCC__ flow.make_image_stack(S, dx, dy, dz, stackdir, image_stack, default_radius, scale); #else std::cout << "You need to have a gpu to make image stack, sorry." << std::endl; #endif } else if (build_inlet_outlet && !modified_bridge) { modified_bridge = true; if (hilbert_curve) flow.modify_synthetic_connection(u, rou, hilbert_curve, height_threshold, in, out, 2, fragment_ratio); else change_fragment = true; } break; } glutPostRedisplay(); } // glut initialization void glut_initialize() { int myargc = 1; char* myargv[1]; myargv[0] = strdup("generate_network_network"); glutInit(&myargc, myargv); glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA); glutInitWindowPosition(100, 100); // set the initial window position glutInitWindowSize(1000, 1000); glutCreateWindow("3D flow simulation"); glutDisplayFunc(glut_render); glutMouseFunc(glut_mouse); glutMotionFunc(glut_motion); glutPassiveMotionFunc(glut_passive_motion); glutMouseWheelFunc(glut_wheel); glutKeyboardFunc(glut_keyboard); glutCreateMenu(glut_menu); // create a menu object glut_set_menu(0, 1); glutAttachMenu(GLUT_RIGHT_BUTTON); // register right mouse to open menu option stim::vec3<float> c = bb.center(); // get the center of the network bounding box // place the camera along the z-axis at a distance determined by the network size along x and y cam.setPosition(c + stim::vec<float>(0, 0, camera_factor * ::max(bb.size()[0], bb.size()[1]))); cam.LookAt(c[0], c[1], c[2]); } // output an advertisement for the lab, authors and usage information void advertise() { std::cout << std::endl << std::endl; std::cout << " =======================================================================================" << std::endl; std::cout << "|Thank you for using the AFAN tool! |" << std::endl; std::cout << "|Scalable Tissue Imaging and Modeling (STIM) Lab, University of Houston |" << std::endl; std::cout << "|Developers: Jiaming Guo, David Mayerich |" << std::endl; std::cout << "|Source: https://git.stim.ee.uh.edu/Jack/flow3.git |" << std::endl; std::cout << " =======================================================================================" << std::endl << std::endl; std::cout << "Usage(keyboard): e -> open/close indexing" << std::endl; std::cout << " m -> build synthetic connections(connection mode)/output augmented network as image stack (manufacture mode)" << std::endl; std::cout << " s -> save network flow profiles in profile folder as cvs files" << std::endl; std::cout << " c -> convert .obj network to .nwt network and stores in main folder" << std::endl; std::cout << " f -> open/close vector field visualization mode" << std::endl; std::cout << " g -> render filament frames in vector fiedl visualization mode" << std::endl; std::cout << args.str(); } // main function: parse arguments and initialize GLUT int main(int argc, char* argv[]) { // add arguments args.add("help", "prints the help"); args.add("units", "string indicating units of length for output measurements (ex. velocity)", "um", "text string"); args.add("maxpress", "maximum allowed pressure in g / units / s^2, default 2 is for blood when units = um", "2", "real value > 0"); args.add("viscosity", "set the viscosity of the fluid (in g / units / s), default .00001 is for blood when units = um", ".00001", "real value > 0"); args.add("rou", "set the desity of the fluid (in g / units^3), default 1.06*10^-12 is for blood when units = um", ".00000000000106", "real value > 0"); args.add("hilbert", "activate hilbert curves connections"); args.add("stack", "load the image stack"); args.add("stackres", "spacing between pixel samples in each dimension(in units/pixel)", "1 1 1", "real value > 0"); args.add("stackdir", "set the directory of the output image stack", "", "any existing directory (ex. /home/name/network)"); args.add("scale", "scale down rendering fibers"); args.add("lcc", "extract the largest connected component"); args.parse(argc, argv); // parse the command line if (args["help"].is_set()) { advertise(); std::exit(1); } // load network if (args.nargs() == 0) { std::cout << "Network file required." << std::endl; return 1; } else { // load network from user std::vector<std::string> tmp = stim::parser::split(args.arg(0), '.'); if ("obj" == tmp[1]) { flow.load_obj(args.arg(0)); backup.load_obj(args.arg(0)); } else if ("nwt" == tmp[1]) { // stim network binary format flow.loadNwt(args.arg(0)); backup.loadNwt(args.arg(0)); } else if ("swc" == tmp[1]) { flow.load_swc(args.arg(0)); backup.load_swc(args.arg(0)); } else { std::cout << "Invalid file type" << std::endl; std::exit(1); } } // extract the largest connected component // get the units to work on units = args["units"].as_string(); flow.set_units(units); // blood pressure in capillaries range from 15 - 35 torr // 1 torr = 133.3 Pa max_pressure = (float)args["maxpress"].as_float(); // normal blood viscosity range from 4 - 15 mPas(cP) // 1 Pas = 1 g / mm / s u = (float)args["viscosity"].as_float(); // g / units / s // normally the blood density in capillaries: 1060 kg/m^3 = 1.06*10^-12 g/um^3 rou = (float)args["rou"].as_float(); // check whether to enable hilbert curves or not hilbert_curve = args["hilbert"].is_set(); // load image stack if provided if (args["stack"].is_set()) { image_stack = true; S.load_images(args["stack"].as_string()); // binary transformation #ifdef __HIPCC__ size_t N = S.samples(); // number of pixels loaded unsigned char* d_S; // image stack stored in device unsigned char* h_S = (unsigned char*)malloc(N * sizeof(unsigned char)); // image stack stored in host hipMalloc((void**)&d_S, N * sizeof(unsigned char)); hipMemcpy(d_S, S.data(), N * sizeof(unsigned char), hipMemcpyHostToDevice); size_t thread = 1024; size_t block = N / thread + 1; hipLaunchKernelGGL(( binary_transform) , dim3(block), dim3(thread), 0, 0, N, d_S, binary_threshold); // binaryzation hipMemcpy(h_S, d_S, N * sizeof(unsigned char), hipMemcpyDeviceToHost); S.copy(h_S); #endif } // get the vexel and image stack size dx = (float)args["stackres"].as_float(0); dy = (float)args["stackres"].as_float(1); dz = (float)args["stackres"].as_float(2); // get the save directory of image stack if (args["stackdir"].is_set()) stackdir = args["stackdir"].as_string(); // get the scale-down factor is provided if (args["scale"].is_set()) scale = (float)args["scale"].as_float(); // glut main loop bb = flow.boundingbox(); glut_initialize(); glutMainLoop(); }
8052d821a475eee93ced6f625597065c40b977a1.cu
#include <stdlib.h> #include <string> #include <fstream> #include <algorithm> // CUDA include #ifdef __CUDACC__ #include "device_launch_parameters.h" #include <cuda.h> #include <cuda_runtime_api.h> #include "cuda_runtime.h" #endif // OPENGL include #include <GL/glut.h> #include <GL/freeglut.h> #include "flow.h" // STIM include #include <stim/visualization/gl_aaboundingbox.h> #include <stim/parser/arguments.h> #include <stim/visualization/camera.h> #include <stim/visualization/colormap.h> #include <stim/cuda/cudatools/error.h> #include <stim/grids/image_stack.h> //********************parameter setting******************** // overall parameters std::string units; // units used in this program int vX, vY; float dx, dy, dz; // x, y and z image scaling(units/pixel) std::string stackdir = ""; // directory where image stacks will be stored stim::arglist args; // create an instance of arglist stim::gl_aaboundingbox<float> bb; // axis-aligned bounding box object stim::camera cam; // camera object unsigned num_edge; // number of edges in the network unsigned num_vertex; // number of vertex in the network std::vector<unsigned> pendant_vertex; // list of pendant vertex index in GT std::vector<std::string> menu_option = { "simulation", "build inlet/outlet", "manufacture", "adjustment" }; stim::flow<float> flow; // flow object stim::flow<float> backup; // flow backup float move_pace; // camera moving parameter float u; // viscosity float rou; // density float max_v; float min_v; int mods; // special keyboard input std::vector<unsigned char> color; // velocity color map std::vector<int> velocity_bar; // velocity bar float length = 40.0f; // cuboid length float scale = 1.0f; // scale factor bool image_stack = false; // flag indicates an image stack been loaded stim::image_stack<unsigned char, float> S; // image stack float binary_threshold = 128; // threshold for binary transformation float in = 0.0f; // total input volume flow rate float out = 0.0f; float Rt = 0.0f; // total resistance float Qn = 0.0f; // required input total volume flow rate GLint dlist; // simulation display list bool undo = false; // delete display list // hard-coded parameters float camera_factor = 1.2f; // start point of the camera as a function of X and Y size float orbit_factor = 0.01f; // degrees per pixel used to orbit the camera float zoom_factor = 10.0f; // zooming factor float border_factor = 20.0f; // border float radii_factor = 1.0f; // radii changing factor GLint subdivision = 20; // slices and stacks float default_radius = 5.0f; // default radii of network vertex float delta = 0.01f; // small discrepancy float eps = 20.0f; // epsilon threshold float max_pressure = 0.0f; // maximum pressure that the channel can bear float height_threshold = 100.0f; // connection height constraint float fragment_ratio = 0.0f; // fragment ratio // glut event parameters int mouse_x; // window x-coordinate int mouse_y; // window y-coordinate int picked_x; // picked window x-coordinate int picked_y; // picked window y-coordinate bool LTbutton = false; // true means down while false means up // simulation parameters bool render_direction = false; // flag indicates rendering flow direction for one edge bool simulation = false; // flag indicates simulation mode bool color_bound = false; // flag indicates velocity color map bound bool to_select_pressure = false; // flag indicates having selected a vertex to modify pressure bool mark_index = true; // flag indicates marking the index near the vertex bool glyph_mode = false; // flag indicates rendering glyph for flow velocity field bool frame_mode = false; // flag indicates rendering filament framing structrues bool subdivided = false; // flag indicates subdivision status unsigned pressure_index; // the index of vertex that is clicked unsigned direction_index = UINT_MAX;// the index of edge that is pointed at std::vector<stim::vec3<float> > back_vertex; // vertex back up for marking indices // build inlet/outlet parameters bool build_inlet_outlet = false; // flag indicates building inlets and outlets bool modified_bridge = false; // flag indicates having modified inlet/outlet connection bool hilbert_curve = false; // flag indicates enabling hilbert curves constructions bool change_fragment = false; // flag indicates changing fragment for square wave connections bool picked_connection = false; // flag indicates picked one connection bool render_new_connection = false; // flag indicates rendering new line connection in trasparency bool redisplay = false; // flag indicates redisplay rendering bool connection_done = false; // flag indicates finishing connections bool render_flow_rate = false; // flag indicates rendering total volume flow rate unsigned connection_index = UINT_MAX;// the index of connection that is picked unsigned port_index = 0; // inlet (0) or outlet (1) stim::vec3<float> tmp_v1, tmp_v2; // temp vertex int coef; // computational coefficient factor // manufacture parameters bool manufacture = false; // flag indicates manufacture mode //********************helper function********************* // get the network basic information inline void get_background() { pendant_vertex = flow.get_pendant_vertex(); num_edge = flow.edges(); num_vertex = flow.vertices(); // set the initial radii flow.init(num_edge, num_vertex); // initialize flow object // if no radius information laoded if (!flow.get_radius(0, 0)) for (unsigned i = 0; i < num_edge; i++) flow.set_r(i, default_radius); } // convert from window coordinates to world coordinates inline void window_to_world(GLdouble &x, GLdouble &y, GLdouble &z) { GLint viewport[4]; GLdouble modelview[16]; GLdouble projection[16]; GLdouble winX, winY; GLfloat winZ; glGetIntegerv(GL_VIEWPORT, viewport); glGetDoublev(GL_MODELVIEW_MATRIX, modelview); glGetDoublev(GL_PROJECTION_MATRIX, projection); winX = (GLdouble)mouse_x; winY = viewport[3] - (GLdouble)mouse_y; glReadPixels((GLint)winX, (GLint)winY, (GLsizei)1, (GLsizei)1, GL_DEPTH_COMPONENT, GL_FLOAT, &winZ); gluUnProject(winX, winY, winZ, modelview, projection, viewport, &x, &y, &z); } // convert current image stack into a binary mask #ifdef __CUDACC__ template <typename T, typename F> __global__ void binary_transform(size_t N, T* ptr, F threshold) { size_t ix = blockDim.x * blockIdx.x + threadIdx.x; if (ix >= N) return; // avoid seg-fault if (ptr[ix] >= threshold) // binary transformation ptr[ix] = 0; else ptr[ix] = 255; } #endif //********************simulation function********************** // initialize flow object void flow_initialize() { flow.set = true; stim::vec3<float> center = bb.center(); flow.P.clear(); flow.P.resize(num_vertex, 0); // clear up initialized pressure for (unsigned i = 0; i < pendant_vertex.size(); i++) { if (flow.get_vertex(pendant_vertex[i])[0] <= center[0]) flow.P[pendant_vertex[i]] = max_pressure - i * delta; // should set minor discrepancy else flow.P[pendant_vertex[i]] = (i + 1) * delta; // algorithm treat 0 as no initial pressure } } // find the stable flow state void flow_stable_state() { flow.solve_flow(u); flow.get_color_map(max_v, min_v, color, pendant_vertex); color_bound = true; velocity_bar.resize(num_edge); for (unsigned i = 0; i < num_edge; i++) velocity_bar[i] = i; std::sort(velocity_bar.begin(), velocity_bar.end(), [&](int x, int y) {return abs(flow.v[x]) < abs(flow.v[y]); }); } // adjustment on input volume flow rate and corresponding flow simulation void adjustment() { system("CLS"); // clear up console box std::cout << "Please enter the input total volume flow rate: " << std::endl; std::cin >> Qn; flow.adjust(in, out, Rt, Qn, u); } //********************glut function******************** // dynamically set menu // @param num: number of current menu options // @param range: range of option to be set from menu_option list void glut_set_menu(int num, int range) { // remove last time menu options for (int i = 1; i < num + 1; i++) glutRemoveMenuItem(1); // set new menu options std::string menu_name; for (int i = 1; i < range + 1; i++) { menu_name = menu_option[i - 1]; glutAddMenuEntry(menu_name.c_str(), i); } } // set up the squash transform to whole screen void glut_projection() { glMatrixMode(GL_PROJECTION); // load the projection matrix for editing glLoadIdentity(); // start with the identity matrix vX = glutGet(GLUT_WINDOW_WIDTH); // use the whole screen for rendering vY = glutGet(GLUT_WINDOW_HEIGHT); glViewport(0, 0, vX, vY); // specify a viewport for the entire window float aspect = (float)vX / (float)vY; // calculate the aspect ratio gluPerspective(60, aspect, 0.1, 1000000); // set up a perspective projection } // translate camera to origin void glut_modelview() { glMatrixMode(GL_MODELVIEW); // load the modelview matrix for editing glLoadIdentity(); // start with the identity matrix stim::vec3<float> eye = cam.getPosition(); // get the camera position (eye point) stim::vec3<float> focus = cam.getLookAt(); // get the camera focal point stim::vec3<float> up = cam.getUp(); // get the camera "up" orientation gluLookAt(eye[0], eye[1], eye[2], focus[0], focus[1], focus[2], up[0], up[1], up[2]); // set up the OpenGL camera } // glut render function void glut_render() { glEnable(GL_DEPTH_TEST); glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glClearColor(1.0f, 1.0f, 1.0f, 1.0f); glut_projection(); glut_modelview(); if (!simulation && !build_inlet_outlet || manufacture) { glColor3f(0.0f, 0.0f, 0.0f); flow.glCylinder0(scale, undo); } else { flow.bounding_box(); // bounding box if (num_vertex > 100) { // if the network is big enough (say 100), use display list if (undo) { // undo rendering list undo = false; glDeleteLists(dlist, 1); } if (!glIsList(dlist)) { dlist = glGenLists(1); glNewList(dlist, GL_COMPILE); // render network if (!glyph_mode) { flow.glSolidSphere(max_pressure, subdivision, scale); if (mark_index) flow.mark_vertex(back_vertex, scale); //flow.glSolidCone(subdivision); flow.glSolidCylinder(direction_index, color, subdivision, scale); } // render glyphs else flow.glyph(color, subdivision, scale, frame_mode); glEndList(); } glCallList(dlist); } else { // small network // render network if (!glyph_mode) { flow.glSolidSphere(max_pressure, subdivision, scale); if (mark_index) { flow.mark_vertex(back_vertex, scale); //flow.mark_edge(); } //flow.glSolidCone(subdivision); flow.glSolidCylinder(direction_index, color, subdivision, scale); } // render glyphs else flow.glyph(color, subdivision, scale, frame_mode); } flow.glSolidCuboid(subdivision, manufacture, length); // render bus source if (render_direction && !glyph_mode) // render the flow direction of the vertex pointed flow.glSolidCone(direction_index, subdivision, scale); } if (build_inlet_outlet) flow.line_bridge(redisplay); if (manufacture) { flow.glSolidCuboid(subdivision, manufacture, length); flow.tube_bridge(redisplay, subdivision, scale); } if (picked_connection && render_new_connection) { glEnable(GL_BLEND); glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); glColor4f(0.0f, 0.0f, 0.0f, 0.4f); glBegin(GL_LINE_STRIP); if (!port_index) { glVertex3f(flow.inlet[connection_index].V[1][0], flow.inlet[connection_index].V[1][1], flow.inlet[connection_index].V[1][2]); glVertex3f(tmp_v1[0], tmp_v1[1], tmp_v1[2]); glVertex3f(tmp_v2[0], tmp_v2[1], tmp_v2[2]); glVertex3f(flow.inlet[connection_index].V[2][0], flow.inlet[connection_index].V[2][1], flow.inlet[connection_index].V[2][2]); } else { glVertex3f(flow.outlet[connection_index].V[1][0], flow.outlet[connection_index].V[1][1], flow.outlet[connection_index].V[1][2]); glVertex3f(tmp_v1[0], tmp_v1[1], tmp_v1[2]); glVertex3f(tmp_v2[0], tmp_v2[1], tmp_v2[2]); glVertex3f(flow.outlet[connection_index].V[2][0], flow.outlet[connection_index].V[2][1], flow.outlet[connection_index].V[2][2]); } glEnd(); glFlush(); glDisable(GL_BLEND); } // render bars // bring up a pressure bar on left if (to_select_pressure) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(0.0, 0.0, 1.0); // blue to red glVertex2f(border_factor, border_factor); glColor3f(1.0, 0.0, 0.0); glVertex2f(border_factor, (vY - 2.0f * border_factor)); glEnd(); glFlush(); // pressure bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Pressure Bar"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // pressure range text float step = vY - 3.0f * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f((border_factor * 1.5f), (border_factor + i * step)); std::stringstream ss_n; ss_n << (float)i * max_pressure / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } // bring up a velocity bar on left if ((simulation || build_inlet_outlet) && !to_select_pressure && !change_fragment) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); float step = (vY - 3 * border_factor); step /= BREWER_CTRL_PTS - 1; for (unsigned i = 0; i < BREWER_CTRL_PTS - 1; i++) { glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(BREWERCP[i * 4 + 0], BREWERCP[i * 4 + 1], BREWERCP[i * 4 + 2]); glVertex2f(border_factor, border_factor + i * step); glColor3f(BREWERCP[(i + 1) * 4 + 0], BREWERCP[(i + 1) * 4 + 1], BREWERCP[(i + 1) * 4 + 2]); glVertex2f(border_factor, border_factor + (i + 1) * step); glEnd(); } glFlush(); // pressure bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Velocity range"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // pressure range text step = vY - 3 * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f(border_factor * 1.5f, border_factor + i * step); std::stringstream ss_n; ss_n << min_v + i * (max_v - min_v) / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } // bring up a ratio bar on the left if (change_fragment) { glMatrixMode(GL_PROJECTION); // set up the 2d viewport for mode text printing glPushMatrix(); glLoadIdentity(); vX = glutGet(GLUT_WINDOW_WIDTH); // get the current window width vY = glutGet(GLUT_WINDOW_HEIGHT); // get the current window height glViewport(0, 0, vX, vY); // locate to left bottom corner gluOrtho2D(0, vX, 0, vY); // define othogonal aspect glMatrixMode(GL_MODELVIEW); glPushMatrix(); glLoadIdentity(); glLineWidth(border_factor); glBegin(GL_LINES); glColor3f(0.0, 0.0, 1.0); // blue to red glVertex2f(border_factor, border_factor); glColor3f(1.0, 0.0, 0.0); glVertex2f(border_factor, (vY - 2.0f * border_factor)); glEnd(); glFlush(); // ratio bar text glColor3f(0.0f, 0.0f, 0.0f); glRasterPos2f(0.0f, vY - border_factor); std::stringstream ss_p; ss_p << "Ratio bar"; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_p.str().c_str())); // ratio range text float step = vY - 3.0f * border_factor; step /= 10; for (unsigned i = 0; i < 11; i++) { glRasterPos2f((border_factor * 1.5f), (border_factor + i * step)); std::stringstream ss_n; ss_n << (float)i * 1.0f / 10; glutBitmapString(GLUT_BITMAP_HELVETICA_18, (const unsigned char*)(ss_n.str().c_str())); } glPopMatrix(); glMatrixMode(GL_PROJECTION); glPopMatrix(); } if (build_inlet_outlet) if (render_flow_rate) flow.display_flow_rate(in, out); glutSwapBuffers(); } // register glut menu options void glut_menu(int value) { int num = glutGet(GLUT_MENU_NUM_ITEMS); if (value == 1) { simulation = true; build_inlet_outlet = false; render_flow_rate = false; manufacture = false; modified_bridge = false; change_fragment = false; connection_done = false; // first time if (!flow.set) { // only first time simulation called "simulation", ^_^ get_background(); // get the graph information back_vertex = flow.back_vertex(); // vertex back up for marking indices flow_initialize(); // initialize flow condition menu_option[0] = "resimulation"; } // simulation / resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); if (!glyph_mode) glut_set_menu(num, 2); } if (value == 2) { simulation = false; build_inlet_outlet = true; manufacture = false; if (!modified_bridge && !connection_done) { flow.set_main_feeder(); flow.build_synthetic_connection(u, default_radius); flow.check_direct_connection(); // check whether direct connections intersect each other connection_done = true; } else if (modified_bridge) { modified_bridge = false; redisplay = true; flow.clear_synthetic_connection(); } glut_set_menu(num, 4); } if (value == 3) { simulation = false; build_inlet_outlet = false; manufacture = true; glyph_mode = false; // manufacuture mode doesn't need flow direction redisplay = true; } if (value == 4) { simulation = true; build_inlet_outlet = false; render_flow_rate = false; manufacture = false; adjustment(); // adjust network flow accordingly glut_set_menu(num, 1); } glutPostRedisplay(); } // defines camera motion based on mouse dragging void glut_motion(int x, int y) { mods = glutGetModifiers(); if (LTbutton && mods == 0) { float theta = orbit_factor * (mouse_x - x); // determine the number of degrees along the x-axis to rotate float phi = orbit_factor * (y - mouse_y); // number of degrees along the y-axis to rotate cam.OrbitFocus(theta, phi); // rotate the camera around the focal point } mouse_x = x; // update the mouse position mouse_y = y; glutPostRedisplay(); // re-draw the visualization } // defines passive mouse motion function void glut_passive_motion(int x, int y) { mods = glutGetModifiers(); // check whether the mouse point near to an edge GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates if (simulation || build_inlet_outlet && !mods) { bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, direction_index); if (flag && !glyph_mode) render_direction = true; else if (!flag && !glyph_mode) { if (render_direction) // if the direction is displaying currently, do a short delay Sleep(300); render_direction = false; direction_index = -1; } undo = true; } if (mods == GLUT_ACTIVE_SHIFT && picked_connection) { render_new_connection = true; size_t i; if (!port_index) { tmp_v1 = stim::vec3<float>(flow.inlet[connection_index].V[1][0], flow.inlet[connection_index].V[1][1] + (float)(picked_y - y), flow.inlet[connection_index].V[1][2]); tmp_v2 = stim::vec3<float>(flow.inlet[connection_index].V[2][0], flow.inlet[connection_index].V[2][1] + (float)(picked_y - y), flow.inlet[connection_index].V[2][2]); i = flow.inlet[connection_index].V.size(); if (coef * tmp_v1[1] < coef * flow.inlet[connection_index].V[i - 1][1]) { tmp_v1[1] = flow.inlet[connection_index].V[i - 1][1]; tmp_v2[1] = flow.inlet[connection_index].V[i - 1][1]; } } else { tmp_v1 = stim::vec3<float>(flow.outlet[connection_index].V[1][0], flow.outlet[connection_index].V[1][1] + (float)(picked_y - y), flow.outlet[connection_index].V[1][2]); tmp_v2 = stim::vec3<float>(flow.outlet[connection_index].V[2][0], flow.outlet[connection_index].V[2][1] + (float)(picked_y - y), flow.outlet[connection_index].V[2][2]); i = flow.outlet[connection_index].V.size(); if (coef * tmp_v1[1] < coef * flow.outlet[connection_index].V[i - 1][1]) { tmp_v1[1] = flow.outlet[connection_index].V[i - 1][1]; tmp_v2[1] = flow.outlet[connection_index].V[i - 1][1]; } } } else if (mods == GLUT_ACTIVE_CTRL && picked_connection) { render_new_connection = true; if (!port_index) { tmp_v1 = stim::vec3<float>(flow.inlet[connection_index].V[0][0] + (float)(x - picked_x), flow.inlet[connection_index].V[0][1], flow.inlet[connection_index].V[0][2]); tmp_v2 = stim::vec3<float>(flow.inlet[connection_index].V[1][0] + (float)(x - picked_x), flow.inlet[connection_index].V[1][1], flow.inlet[connection_index].V[1][2]); if (tmp_v1[0] < flow.main_feeder[port_index][0] - length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] - length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] - length / 2; } else if (tmp_v1[0] > flow.main_feeder[port_index][0] + length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] + length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] + length / 2; } } else { tmp_v1 = stim::vec3<float>(flow.outlet[connection_index].V[0][0] + (float)(x - picked_x), flow.outlet[connection_index].V[0][1], flow.outlet[connection_index].V[0][2]); tmp_v2 = stim::vec3<float>(flow.outlet[connection_index].V[1][0] + (float)(x - picked_x), flow.outlet[connection_index].V[1][1], flow.outlet[connection_index].V[1][2]); if (tmp_v1[0] > flow.main_feeder[port_index][0] + length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] + length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] + length / 2; } else if (tmp_v1[0] < flow.main_feeder[port_index][0] - length / 2) { tmp_v1[0] = flow.main_feeder[port_index][0] - length / 2; tmp_v2[0] = flow.main_feeder[port_index][0] - length / 2; } } } else render_new_connection = false; mouse_x = x; mouse_y = y; glutPostRedisplay(); // re-draw the visualization } // get click window coordinates void glut_mouse(int button, int state, int x, int y) { mods = glutGetModifiers(); // get special keyboard input mouse_x = x; mouse_y = y; if (!mods) { picked_connection = false; render_new_connection = false; } if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN) LTbutton = true; else if (button == GLUT_LEFT_BUTTON && state == GLUT_UP) LTbutton = false; if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && simulation && !to_select_pressure) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_vertex((float)posX, (float)posY, (float)posZ, eps, scale, pressure_index); if (flag) { std::vector<unsigned>::iterator it = std::find(pendant_vertex.begin(), pendant_vertex.end(), pressure_index); if (it != pendant_vertex.end()) // if it is dangle vertex to_select_pressure = true; } } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && simulation && to_select_pressure) { if (y >= 2 * border_factor && y <= vY - border_factor) { // within the pressure bar range to_select_pressure = false; float tmp_pressure = (float)(vY - y - border_factor) / ((float)vY - 3.0f * border_factor) * max_pressure; flow.set_pressure(pressure_index, tmp_pressure); //flow_stable_state(); // main function of solving the linear system //flow.print_flow(); } } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && !mods && modified_bridge && change_fragment) { if (y >= 2 * border_factor && y <= vY - border_factor) // within the ratio bar range fragment_ratio = (float)(vY - y - border_factor) / ((float)vY - 3.0f * border_factor) * 1.0f; else if (y < 2 * border_factor) fragment_ratio = 1.0f; else if (y > vY - border_factor) fragment_ratio = 0.0f; change_fragment = false; render_flow_rate = true; flow.modify_synthetic_connection(u, rou, hilbert_curve, height_threshold, in, out, 2, fragment_ratio, default_radius); } // move connections along y-axis else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_SHIFT && !modified_bridge && !picked_connection) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, connection_index, port_index); if (flag) { picked_connection = true; picked_x = x; picked_y = y; if (!port_index) if (flow.inlet[connection_index].V[2][1] > flow.main_feeder[port_index][1]) coef = 1; else coef = -1; else if (flow.outlet[connection_index].V[2][1] > flow.main_feeder[port_index][1]) coef = 1; else coef = -1; } else picked_connection = false; } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_SHIFT && !modified_bridge && render_new_connection) { float l = 0.0f; std::vector<typename stim::vec3<float> > V; size_t i; if (!port_index) { i = flow.inlet[connection_index].V.size(); if (tmp_v2[1] != flow.inlet[connection_index].V[i - 1][1]) { V.resize(4); V[0] = flow.inlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; V[3] = flow.inlet[connection_index].V[i - 1]; std::swap(flow.inlet[connection_index].V, V); } else { V.resize(3); V[0] = flow.inlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; std::swap(flow.inlet[connection_index].V, V); } // calculate new length for (unsigned i = 0; i < flow.inlet[connection_index].V.size() - 1; i++) { l += (flow.inlet[connection_index].V[i + 1] - flow.inlet[connection_index].V[i]).len(); } flow.inlet[connection_index].l = l; } else { i = flow.outlet[connection_index].V.size(); if (tmp_v2[1] != flow.outlet[connection_index].V[i - 1][1]) { V.resize(4); V[0] = flow.outlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; V[3] = flow.outlet[connection_index].V[i - 1]; std::swap(flow.outlet[connection_index].V, V); } else { V.resize(3); V[0] = flow.outlet[connection_index].V[0]; V[1] = tmp_v1; V[2] = tmp_v2; std::swap(flow.outlet[connection_index].V, V); } // calculate new length for (unsigned i = 0; i < flow.outlet[connection_index].V.size() - 1; i++) { l += (flow.outlet[connection_index].V[i + 1] - flow.outlet[connection_index].V[i]).len(); } flow.outlet[connection_index].l = l; } redisplay = true; render_new_connection = false; picked_connection = false; flow.check_direct_connection(); flow.backup(); // back up direct synthetic connections } // move connections along x-axis else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_CTRL && !modified_bridge && !picked_connection) { GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates bool flag = flow.epsilon_edge((float)posX, (float)posY, (float)posZ, eps, connection_index, port_index); if (flag) { picked_connection = true; picked_x = x; picked_y = y; if (!port_index) coef = 1; else coef = -1; } else picked_connection = false; } else if (button == GLUT_LEFT_BUTTON && state == GLUT_DOWN && mods == GLUT_ACTIVE_CTRL && !modified_bridge && render_new_connection) { float l = 0.0f; if (!port_index) { flow.inlet[connection_index].V[0] = tmp_v1; flow.inlet[connection_index].V[1] = tmp_v2; // calculate new length for (unsigned i = 0; i < flow.inlet[connection_index].V.size() - 1; i++) { l += (flow.inlet[connection_index].V[i + 1] - flow.inlet[connection_index].V[i]).len(); } flow.inlet[connection_index].l = l; } else { flow.outlet[connection_index].V[0] = tmp_v1; flow.outlet[connection_index].V[1] = tmp_v2; // calculate new length for (unsigned i = 0; i < flow.outlet[connection_index].V.size() - 1; i++) { l += (flow.outlet[connection_index].V[i + 1] - flow.outlet[connection_index].V[i]).len(); } flow.outlet[connection_index].l = l; } redisplay = true; render_new_connection = false; picked_connection = false; flow.check_direct_connection(); flow.backup(); } } // define camera move based on mouse wheel move void glut_wheel(int wheel, int direction, int x, int y) { mods = glutGetModifiers(); mouse_x = x; mouse_y = y; GLdouble posX, posY, posZ; window_to_world(posX, posY, posZ); // get the world coordinates if (!to_select_pressure && (simulation || build_inlet_outlet || manufacture)) { // check current pixel position only in simualtion and build_inlet_outlet modes bool flag = flow.epsilon_vertex((float)posX, (float)posY, (float)posZ, eps, scale, pressure_index); if (flag && simulation && !glyph_mode) { float tmp_r; if (direction > 0) { // increase radii tmp_r = flow.get_radius(pressure_index); tmp_r += radii_factor; } else { tmp_r = flow.get_radius(pressure_index); tmp_r -= radii_factor; if (tmp_r <= 0) tmp_r = default_radius; } flow.set_radius(pressure_index, tmp_r); undo = true; // undo rendering } else if (!mods) { if (direction > 0) // if it is button 3(up), move closer move_pace = zoom_factor; else // if it is button 4(down), leave farther move_pace = -zoom_factor; cam.Push(move_pace); } } // rescale if (mods == GLUT_ACTIVE_CTRL) { if (direction > 0) { if (scale >= 1) scale += 1.0f; else scale += 0.1f; } else { if (scale > 1) scale -= 1.0f; else if (scale <= 1 && scale > 0.1f) scale -= 0.1f; else scale = 1.0f; } undo = true; redisplay = true; } glutPostRedisplay(); } // define keyboard inputs void glut_keyboard(unsigned char key, int x, int y) { // register different keyboard operation switch (key) { // saving network flow profile case 's': flow.save_network(); break; // convert network to binary format (.nwt) case 'c': { std::vector<std::string> tmp = stim::parser::split(args.arg(0), '.'); std::stringstream ss; ss << tmp[0] << ".nwt"; std::string filename = ss.str(); flow.saveNwt(filename); break; } // subdivide current network for more detailed calculation case 'd': { // subdivide current network due to the limitation of current computation if needed if (!subdivided && simulation && !glyph_mode) { subdivided = true; // check whether current network can be subdivided if (flow.check_subdivision()) { flow.subdivision(); get_background(); } flow_initialize(); // initialize flow condition // resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); undo = true; } else if (subdivided && simulation && !glyph_mode) { subdivided = false; flow = backup; // load back up get_background(); flow_initialize(); // resimulation flow_stable_state(); // main function of solving the linear system flow.print_flow(); undo = true; } break; } // flow vector field visualization, Glyphs case 'f': if (glyph_mode && !manufacture && (simulation || build_inlet_outlet)) { glyph_mode = false; frame_mode = false; redisplay = true; // lines and arrows rendering use the same display list int num = glutGet(GLUT_MENU_NUM_ITEMS); if (num == 1) glut_set_menu(num, 2); } else if (!glyph_mode && !manufacture && (simulation || build_inlet_outlet)) { glyph_mode = true; redisplay = true; int num = glutGet(GLUT_MENU_NUM_ITEMS); if (num == 2) glut_set_menu(num, 1); } undo = true; break; // filaments around arrows case 'g': if (glyph_mode) { if (frame_mode) frame_mode = false; else frame_mode = true; } undo = true; break; // open/close index marks case 'e': if (mark_index) mark_index = false; else mark_index = true; undo = true; break; // output image stack case 'm': if (manufacture) { #ifdef __CUDACC__ flow.make_image_stack(S, dx, dy, dz, stackdir, image_stack, default_radius, scale); #else std::cout << "You need to have a gpu to make image stack, sorry." << std::endl; #endif } else if (build_inlet_outlet && !modified_bridge) { modified_bridge = true; if (hilbert_curve) flow.modify_synthetic_connection(u, rou, hilbert_curve, height_threshold, in, out, 2, fragment_ratio); else change_fragment = true; } break; } glutPostRedisplay(); } // glut initialization void glut_initialize() { int myargc = 1; char* myargv[1]; myargv[0] = strdup("generate_network_network"); glutInit(&myargc, myargv); glutInitDisplayMode(GLUT_DEPTH | GLUT_DOUBLE | GLUT_RGBA); glutInitWindowPosition(100, 100); // set the initial window position glutInitWindowSize(1000, 1000); glutCreateWindow("3D flow simulation"); glutDisplayFunc(glut_render); glutMouseFunc(glut_mouse); glutMotionFunc(glut_motion); glutPassiveMotionFunc(glut_passive_motion); glutMouseWheelFunc(glut_wheel); glutKeyboardFunc(glut_keyboard); glutCreateMenu(glut_menu); // create a menu object glut_set_menu(0, 1); glutAttachMenu(GLUT_RIGHT_BUTTON); // register right mouse to open menu option stim::vec3<float> c = bb.center(); // get the center of the network bounding box // place the camera along the z-axis at a distance determined by the network size along x and y cam.setPosition(c + stim::vec<float>(0, 0, camera_factor * std::max(bb.size()[0], bb.size()[1]))); cam.LookAt(c[0], c[1], c[2]); } // output an advertisement for the lab, authors and usage information void advertise() { std::cout << std::endl << std::endl; std::cout << " =======================================================================================" << std::endl; std::cout << "|Thank you for using the AFAN tool! |" << std::endl; std::cout << "|Scalable Tissue Imaging and Modeling (STIM) Lab, University of Houston |" << std::endl; std::cout << "|Developers: Jiaming Guo, David Mayerich |" << std::endl; std::cout << "|Source: https://git.stim.ee.uh.edu/Jack/flow3.git |" << std::endl; std::cout << " =======================================================================================" << std::endl << std::endl; std::cout << "Usage(keyboard): e -> open/close indexing" << std::endl; std::cout << " m -> build synthetic connections(connection mode)/output augmented network as image stack (manufacture mode)" << std::endl; std::cout << " s -> save network flow profiles in profile folder as cvs files" << std::endl; std::cout << " c -> convert .obj network to .nwt network and stores in main folder" << std::endl; std::cout << " f -> open/close vector field visualization mode" << std::endl; std::cout << " g -> render filament frames in vector fiedl visualization mode" << std::endl; std::cout << args.str(); } // main function: parse arguments and initialize GLUT int main(int argc, char* argv[]) { // add arguments args.add("help", "prints the help"); args.add("units", "string indicating units of length for output measurements (ex. velocity)", "um", "text string"); args.add("maxpress", "maximum allowed pressure in g / units / s^2, default 2 is for blood when units = um", "2", "real value > 0"); args.add("viscosity", "set the viscosity of the fluid (in g / units / s), default .00001 is for blood when units = um", ".00001", "real value > 0"); args.add("rou", "set the desity of the fluid (in g / units^3), default 1.06*10^-12 is for blood when units = um", ".00000000000106", "real value > 0"); args.add("hilbert", "activate hilbert curves connections"); args.add("stack", "load the image stack"); args.add("stackres", "spacing between pixel samples in each dimension(in units/pixel)", "1 1 1", "real value > 0"); args.add("stackdir", "set the directory of the output image stack", "", "any existing directory (ex. /home/name/network)"); args.add("scale", "scale down rendering fibers"); args.add("lcc", "extract the largest connected component"); args.parse(argc, argv); // parse the command line if (args["help"].is_set()) { advertise(); std::exit(1); } // load network if (args.nargs() == 0) { std::cout << "Network file required." << std::endl; return 1; } else { // load network from user std::vector<std::string> tmp = stim::parser::split(args.arg(0), '.'); if ("obj" == tmp[1]) { flow.load_obj(args.arg(0)); backup.load_obj(args.arg(0)); } else if ("nwt" == tmp[1]) { // stim network binary format flow.loadNwt(args.arg(0)); backup.loadNwt(args.arg(0)); } else if ("swc" == tmp[1]) { flow.load_swc(args.arg(0)); backup.load_swc(args.arg(0)); } else { std::cout << "Invalid file type" << std::endl; std::exit(1); } } // extract the largest connected component // get the units to work on units = args["units"].as_string(); flow.set_units(units); // blood pressure in capillaries range from 15 - 35 torr // 1 torr = 133.3 Pa max_pressure = (float)args["maxpress"].as_float(); // normal blood viscosity range from 4 - 15 mPa·s(cP) // 1 Pa·s = 1 g / mm / s u = (float)args["viscosity"].as_float(); // g / units / s // normally the blood density in capillaries: 1060 kg/m^3 = 1.06*10^-12 g/um^3 rou = (float)args["rou"].as_float(); // check whether to enable hilbert curves or not hilbert_curve = args["hilbert"].is_set(); // load image stack if provided if (args["stack"].is_set()) { image_stack = true; S.load_images(args["stack"].as_string()); // binary transformation #ifdef __CUDACC__ size_t N = S.samples(); // number of pixels loaded unsigned char* d_S; // image stack stored in device unsigned char* h_S = (unsigned char*)malloc(N * sizeof(unsigned char)); // image stack stored in host cudaMalloc((void**)&d_S, N * sizeof(unsigned char)); cudaMemcpy(d_S, S.data(), N * sizeof(unsigned char), cudaMemcpyHostToDevice); size_t thread = 1024; size_t block = N / thread + 1; binary_transform <<<block, thread>>> (N, d_S, binary_threshold); // binaryzation cudaMemcpy(h_S, d_S, N * sizeof(unsigned char), cudaMemcpyDeviceToHost); S.copy(h_S); #endif } // get the vexel and image stack size dx = (float)args["stackres"].as_float(0); dy = (float)args["stackres"].as_float(1); dz = (float)args["stackres"].as_float(2); // get the save directory of image stack if (args["stackdir"].is_set()) stackdir = args["stackdir"].as_string(); // get the scale-down factor is provided if (args["scale"].is_set()) scale = (float)args["scale"].as_float(); // glut main loop bb = flow.boundingbox(); glut_initialize(); glutMainLoop(); }
d94976a9ab400cdd1edc51a7c8237bf851cafd92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <quda_internal.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <face_quda.h> // this is where the MPI / QMP depdendent code is #include <hip/hip_complex.h> #define REDUCE_MAX_BLOCKS 65536 #define REDUCE_DOUBLE 64 #define REDUCE_KAHAN 32 #if (__COMPUTE_CAPABILITY__ >= 130) #define REDUCE_TYPE REDUCE_DOUBLE #define QudaSumFloat double #define QudaSumComplex hipDoubleComplex #define QudaSumFloat3 double3 #else #define REDUCE_TYPE REDUCE_KAHAN #define QudaSumFloat float #define QudaSumComplex hipComplex #define QudaSumFloat3 float3 #endif // These are used for reduction kernels static QudaSumFloat *d_reduceFloat=0; static QudaSumComplex *d_reduceComplex=0; static QudaSumFloat3 *d_reduceFloat3=0; static QudaSumFloat *h_reduceFloat=0; static QudaSumComplex *h_reduceComplex=0; static QudaSumFloat3 *h_reduceFloat3=0; namespace quda { unsigned long long blas_flops; unsigned long long blas_bytes; } static dim3 blasBlock; static dim3 blasGrid; // generated by blas_test #include <blas_param.h> double2 operator+(const double2& x, const double2 &y) { return make_double2(x.x + y.x, x.y + y.y); } double3 operator+(const double3& x, const double3 &y) { double3 z; z.x = x.x + y.x; z.y = x.y + y.y; z.z = x.z + y.z; return z; } __device__ float2 operator*(const float a, const float2 x) { float2 y; y.x = a*x.x; y.y = a*x.y; return y; } template <typename Float2> __device__ Float2 operator+(const Float2 x, const Float2 y) { Float2 z; z.x = x.x + y.x; z.y = x.y + y.y; return z; } template <typename Float2> __device__ Float2 operator+=(Float2 &x, const Float2 y) { x.x += y.x; x.y += y.y; return x; } template <typename Float2> __device__ Float2 operator-=(Float2 &x, const Float2 y) { x.x -= y.x; x.y -= y.y; return x; } template <typename Float, typename Float2> __device__ Float2 operator*=(Float2 &x, const Float a) { x.x *= a; x.y *= a; return x; } template <typename Float> __device__ float4 operator*=(float4 &a, const Float &b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; return a; } void zeroCuda(cudaColorSpinorField &a) { a.zero(); } // blasTuning = 1 turns off error checking static QudaTune blasTuning = QUDA_TUNE_NO; namespace quda { void initBlas(void) { if (!d_reduceFloat) { if (hipMalloc((void**) &d_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == hipErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!d_reduceComplex) { if (hipMalloc((void**) &d_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == hipErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!d_reduceFloat3) { if (hipMalloc((void**) &d_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == hipErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!h_reduceFloat) { if (hipHostMalloc((void**) &h_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == hipErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } if (!h_reduceComplex) { if (hipHostMalloc((void**) &h_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == hipErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } if (!h_reduceFloat3) { if (hipHostMalloc((void**) &h_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == hipErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } } void endBlas(void) { if (d_reduceFloat) { hipFree(d_reduceFloat); d_reduceFloat = 0; } if (d_reduceComplex) { hipFree(d_reduceComplex); d_reduceComplex = 0; } if (d_reduceFloat3) { hipFree(d_reduceFloat3); d_reduceFloat3 = 0; } if (h_reduceFloat) { hipHostFree(h_reduceFloat); h_reduceFloat = 0; } if (h_reduceComplex) { hipHostFree(h_reduceComplex); h_reduceComplex = 0; } if (h_reduceFloat3) { hipHostFree(h_reduceFloat3); h_reduceFloat3 = 0; } } void setBlasTuning(QudaTune tune) { blasTuning = tune; } void setBlasParam(int kernel, int prec, int threads, int blocks) { blas_threads[kernel][prec] = threads; blas_blocks[kernel][prec] = blocks; } } void setBlock(int kernel, int length, QudaPrecision precision) { int prec; switch(precision) { case QUDA_HALF_PRECISION: prec = 0; break; case QUDA_SINGLE_PRECISION: prec = 1; break; case QUDA_DOUBLE_PRECISION: prec = 2; break; } int blocks = min(blas_blocks[kernel][prec], max(length/blas_threads[kernel][prec], 1)); blasBlock.x = blas_threads[kernel][prec]; blasBlock.y = 1; blasBlock.z = 1; blasGrid.x = blocks; blasGrid.y = 1; blasGrid.z = 1; } #if (__COMPUTE_CAPABILITY__ >= 130) static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i) { int4 v = tex1Dfetch(t,i); return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); } #else static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i) { // do nothing return make_double2(0.0, 0.0); } #endif float2 __device__ read_Float2(float2 *x, int i) { return make_float2(x[i].x, x[i].y); } double2 __device__ read_Float2(double2 *x, int i) { return make_double2(x[i].x, x[i].y); } #if FERMI_NO_DBLE_TEX #define READ_DOUBLE2_TEXTURE(x, i) \ read_Float2(x, i) #else #define READ_DOUBLE2_TEXTURE(x, i) \ fetch_double2(x##TexDouble2, i) #endif #define READ_FLOAT2_TEXTURE(x, i) \ tex1Dfetch(x##TexSingle2, i) float2 __device__ make_Float2(float2 x) { return make_float2(x.x, x.y); } double2 __device__ make_Float2(double2 x) { return make_double2(x.x, x.y); } #define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float4 a##0 = tex1Dfetch(texHalf, i + 0*length); \ float4 a##1 = tex1Dfetch(texHalf, i + 1*length); \ float4 a##2 = tex1Dfetch(texHalf, i + 2*length); \ float4 a##3 = tex1Dfetch(texHalf, i + 3*length); \ float4 a##4 = tex1Dfetch(texHalf, i + 4*length); \ float4 a##5 = tex1Dfetch(texHalf, i + 5*length); \ a##0 *= a##c; \ a##1 *= a##c; \ a##2 *= a##c; \ a##3 *= a##c; \ a##4 *= a##c; \ a##5 *= a##c; #define RECONSTRUCT_HALF_SPINOR_ST(a, texHalf, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float2 a##0 = tex1Dfetch(texHalf, i + 0*length); \ float2 a##1 = tex1Dfetch(texHalf, i + 1*length); \ float2 a##2 = tex1Dfetch(texHalf, i + 2*length); \ (a##0) *= a##c; \ (a##1) *= a##c; \ (a##2) *= a##c; // Some musings on how to clean up the blas code using Boost /*#define BOOST_RECONSTRUCT_HALF_SPINOR(z, j, a, texHalf, length) \ float4 a##k tex1Dfetch(texHalf, i + j*length); \ a##k *= a##c; #define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \ BOOST_PP_REPEAT(6, BOOST_RECONSTRUCT_HALF_SPINOR, a, texHalf, length) \ */ #define READ_HALF_SPINOR_TEX(a, tex, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float4 a##0 = tex1Dfetch(tex, i + 0*length); \ float4 a##1 = tex1Dfetch(tex, i + 1*length); \ float4 a##2 = tex1Dfetch(tex, i + 2*length); \ float4 a##3 = tex1Dfetch(tex, i + 3*length); \ float4 a##4 = tex1Dfetch(tex, i + 4*length); \ float4 a##5 = tex1Dfetch(tex, i + 5*length); \ #define READ_HALF_SPINOR(a, tex, length) \ float4 a##0 = tex1Dfetch(tex, i + 0*length); \ float4 a##1 = tex1Dfetch(tex, i + 1*length); \ float4 a##2 = tex1Dfetch(tex, i + 2*length); \ float4 a##3 = tex1Dfetch(tex, i + 3*length); \ float4 a##4 = tex1Dfetch(tex, i + 4*length); \ float4 a##5 = tex1Dfetch(tex, i + 5*length); \ float a##c = a##N[i]; #define READ_HALF_SPINOR_ST(a, tex, length) \ float2 a##0 = tex1Dfetch(tex, i + 0*length); \ float2 a##1 = tex1Dfetch(tex, i + 1*length); \ float2 a##2 = tex1Dfetch(tex, i + 2*length); \ float a##c = a##N[i]; #define FAST_ABS_MAX(a, b) fmaxf(fabsf(a), fabsf(b)); #define FAST_MAX(a, b) fmaxf(a, b); __device__ float fast_abs_max(float4 a) { float c0 = FAST_ABS_MAX(a.x, a.y); float c1 = FAST_ABS_MAX(a.z, a.w); return FAST_MAX(c0, c1); } #define CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, n, a, length) { \ float c0 = fast_abs_max(a##0); \ float c1 = fast_abs_max(a##1); \ c0 = FAST_MAX(c0, c1); \ float c2 = fast_abs_max(a##2); \ float c3 = fast_abs_max(a##3); \ c1 = FAST_MAX(c2, c3); \ c0 = FAST_MAX(c0, c1); \ c2 = fast_abs_max(a##4); \ c3 = fast_abs_max(a##5); \ c1 = FAST_MAX(c2, c3); \ c0 = FAST_MAX(c0, c1); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \ (short)(C*(float)(a##0).z), (short)(C*(float)(a##0).w)); \ h[i+1*length] = make_short4((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y), \ (short)(C*(float)(a##1).z), (short)(C*(float)(a##1).w)); \ h[i+2*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \ (short)(C*(float)(a##2).z), (short)(C*(float)(a##2).w)); \ h[i+3*length] = make_short4((short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y), \ (short)(C*(float)(a##3).z), (short)(C*(float)(a##3).w)); \ h[i+4*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \ (short)(C*(float)(a##4).z), (short)(C*(float)(a##4).w)); \ h[i+5*length] = make_short4((short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y), \ (short)(C*(float)(a##5).z), (short)(C*(float)(a##5).w));} #define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ float c3 = fmaxf(fabsf((a##3).x), fabsf((a##3).y)); \ float c4 = fmaxf(fabsf((a##4).x), fabsf((a##4).y)); \ float c5 = fmaxf(fabsf((a##5).x), fabsf((a##5).y)); \ float c6 = fmaxf(fabsf((a##6).x), fabsf((a##6).y)); \ float c7 = fmaxf(fabsf((a##7).x), fabsf((a##7).y)); \ float c8 = fmaxf(fabsf((a##8).x), fabsf((a##8).y)); \ float c9 = fmaxf(fabsf((a##9).x), fabsf((a##9).y)); \ float c10 = fmaxf(fabsf((a##10).x), fabsf((a##10).y)); \ float c11 = fmaxf(fabsf((a##11).x), fabsf((a##11).y)); \ c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); c2 = fmaxf(c4, c5); c3 = fmaxf(c6, c7); \ c4 = fmaxf(c8, c9); c5 = fmaxf(c10, c11); c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); \ c2 = fmaxf(c4, c5); c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \ (short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+1*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \ (short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y)); \ h[i+2*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \ (short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y)); \ h[i+3*length] = make_short4((short)(C*(float)(a##6).x), (short)(C*(float)(a##6).y), \ (short)(C*(float)(a##7).x), (short)(C*(float)(a##7).y)); \ h[i+4*length] = make_short4((short)(C*(float)(a##8).x), (short)(C*(float)(a##8).y), \ (short)(C*(float)(a##9).x), (short)(C*(float)(a##9).y)); \ h[i+5*length] = make_short4((short)(C*(float)(a##10).x), (short)(C*(float)(a##10).y), \ (short)(C*(float)(a##11).x), (short)(C*(float)(a##11).y));} #define CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \ h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));} #define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \ h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));} #define SUM_FLOAT4(sum, a) \ float sum = fabs(a.x) + fabs(a.y) + fabs(a.z) + fabs(a.w); #define SUM_FLOAT2(sum, a) \ float sum = fabs(a.x) + fabs(a.y); #if (__COMPUTE_CAPABILITY__ < 200) #define REAL_DOT_FLOAT4(dot, a, b) \ float dot = a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w; #else #define REAL_DOT_FLOAT4(dot, a, b) \ float dot = fmaf(a.x, b.x, 0.0f); \ dot = fmaf(a.y, b.y, dot); \ dot = fmaf(a.z, b.z, dot); \ dot = fmaf(a.w, b.w, dot) #endif #define REAL_DOT_FLOAT2(dot, a, b) \ float dot = a.x*b.x + a.y*b.y; #if (__COMPUTE_CAPABILITY__ < 200) #define IMAG_DOT_FLOAT4(dot, a, b) \ float dot = a.x*b.y - a.y*b.x + a.z*b.w - a.w*b.z; #else #define IMAG_DOT_FLOAT4(dot, a, b) \ float dot = fmaf(a.x, b.y, 0.0f); \ dot = fmaf(-a.y, b.x, dot); \ dot = fmaf(a.z, b.w, dot); \ dot = fmaf(-a.w, b.z, dot) #endif #define IMAG_DOT_FLOAT2(dot, a, b) \ float dot = a.x*b.y - a.y*b.x; #define AX_FLOAT4(a, X) \ X.x *= a; X.y *= a; X.z *= a; X.w *= a; #define AX_FLOAT2(a, X) \ X.x *= a; X.y *= a; #define XPY_FLOAT4(X, Y) \ Y.x += X.x; Y.y += X.y; Y.z += X.z; Y.w += X.w; #define XPY_FLOAT2(X, Y) \ Y.x += X.x; Y.y += X.y; #define XMY_FLOAT4(X, Y) \ Y.x = X.x - Y.x; Y.y = X.y - Y.y; Y.z = X.z - Y.z; Y.w = X.w - Y.w; #define XMY_FLOAT2(X, Y) \ Y.x = X.x - Y.x; Y.y = X.y - Y.y; #define MXPY_FLOAT4(X, Y) \ Y.x -= X.x; Y.y -= X.y; Y.z -= X.z; Y.w -= X.w; #define MXPY_FLOAT2(X, Y) \ Y.x -= X.x; Y.y -= X.y; #if (__COMPUTE_CAPABILITY__ < 200) #define AXPY_FLOAT4(a, X, Y) \ Y.x += a*X.x; Y.y += a*X.y; \ Y.z += a*X.z; Y.w += a*X.w; #else #define AXPY_FLOAT4(a, X, Y) \ Y.x = fmaf(a, X.x, Y.x); Y.y = fmaf(a, X.y, Y.y); \ Y.z = fmaf(a, X.z, Y.z); Y.w = fmaf(a, X.w, Y.w); #endif #define AXPY_FLOAT2(a, X, Y) \ Y.x += a*X.x; Y.y += a*X.y; #define AXPBY_FLOAT4(a, X, b, Y) \ Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \ Y.z = b*Y.z; Y.z += a*X.z; Y.w = b*Y.w; Y.w += a*X.w; #define AXPBY_FLOAT2(a, X, b, Y) \ Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \ #if (__COMPUTE_CAPABILITY__ < 200) #define XPAY_FLOAT4(X, a, Y) \ Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; \ Y.z = X.z + a*Y.z; Y.w = X.w + a*Y.w; #else #define XPAY_FLOAT4(X, a, Y) \ Y.x = fmaf(a, Y.x, X.x); Y.y = fmaf(a, Y.y, X.y); \ Y.z = fmaf(a, Y.z, X.z); Y.w = fmaf(a, Y.w, X.w); #endif #define XPAY_FLOAT2(X, a, Y) \ Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_FLOAT4(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; \ Y.z += a.x*X.z; Y.z -= a.y*X.w; \ Y.w += a.y*X.z; Y.w += a.x*X.w; #else #define CAXPY_FLOAT4(a, X, Y) \ Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \ Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); \ Y.z = fmaf(a.x, X.z, Y.z); Y.z = fmaf(-a.y, X.w, Y.z); \ Y.w = fmaf(a.y, X.z, Y.w); Y.w = fmaf( a.x, X.w, Y.w); #endif // (__COMPUTE_CAPABILITY__ < 200) #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_FLOAT2(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; #else #define CAXPY_FLOAT2(a, X, Y) \ Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \ Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); #endif // (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_DOUBLE2(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; \ #define CMAXPY_FLOAT4(a, X, Y) \ Y.x -= a.x*X.x; Y.x += a.y*X.y; \ Y.y -= a.y*X.x; Y.y -= a.x*X.y; \ Y.z -= a.x*X.z; Y.z += a.y*X.w; \ Y.w -= a.y*X.z; Y.w -= a.x*X.w; #define CMAXPY_FLOAT2(a, X, Y) \ Y.x -= a.x*X.x; Y.x += a.y*X.y; \ Y.y -= a.y*X.x; Y.y -= a.x*X.y; #define CAXPBY_FLOAT4(a, X, b, Y) \ { float2 y; \ y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \ y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \ Y.x = y.x; Y.y = y.y; \ y.x = a.x*X.z; y.x -= a.y*X.w; y.x += b.x*Y.z; y.x -= b.y*Y.w; \ y.y = a.y*X.z; y.y += a.x*X.w; y.y += b.y*Y.z; y.y += b.x*Y.w; \ Y.z = y.x; Y.w = y.y;} #define CAXPBY_FLOAT2(a, X, b, Y) \ { float2 y; \ y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \ y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \ Y.x = y.x; Y.y = y.y;} #define CXPAYPBZ_FLOAT4(X, a, Y, b, Z) \ {float2 z; \ z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \ z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \ Z.x = z.x; Z.y = z.y; \ z.x = X.z + a.x*Y.z; z.x -= a.y*Y.w; z.x += b.x*Z.z; z.x -= b.y*Z.w; \ z.y = X.w + a.y*Y.z; z.y += a.x*Y.w; z.y += b.y*Z.z; z.y += b.x*Z.w; \ Z.z = z.x; Z.w = z.y;} #define CXPAYPBZ_FLOAT2(X, a, Y, b, Z) \ {float2 z; \ z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \ z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \ Z.x = z.x; Z.y = z.y;} #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \ Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \ Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; \ Z.z += a.x*X.z - a.y*X.w + b.x*Y.z - b.y*Y.w; \ Z.w += a.y*X.z + a.x*X.w + b.y*Y.z + b.x*Y.w; #else #define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \ Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \ Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); \ Z.z = fmaf(a.x, X.z, Z.z); Z.z = fmaf(-a.y, X.w, Z.z); Z.z = fmaf(b.x, Y.z, Z.z); Z.z = fmaf(-b.y, Y.w, Z.z); \ Z.w = fmaf(a.y, X.z, Z.w); Z.w = fmaf( a.x, X.w, Z.w); Z.w = fmaf(b.y, Y.z, Z.w); Z.w = fmaf( b.x, Y.w, Z.w); #endif // (__COMPUTE_CAPABILITY__ < 200) #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \ Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \ Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; #else #define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \ Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \ Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); #endif // (__COMPUTE_CAPABILITY__ < 200) // Double precision input spinor field texture<int4, 1> xTexDouble2; texture<int4, 1> yTexDouble2; texture<int4, 1> zTexDouble2; texture<int4, 1> wTexDouble2; texture<int4, 1> uTexDouble2; // Single precision input spinor field texture<float2, 1> xTexSingle2; texture<float2, 1> yTexSingle2; texture<float4, 1> xTexSingle4; // Half precision input spinor field texture<short4, 1, hipReadModeNormalizedFloat> texHalf1; texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt1; texture<float, 1, hipReadModeElementType> texNorm1; // Half precision input spinor field texture<short4, 1, hipReadModeNormalizedFloat> texHalf2; texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt2; texture<float, 1, hipReadModeElementType> texNorm2; // Half precision input spinor field texture<short4, 1, hipReadModeNormalizedFloat> texHalf3; texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt3; texture<float, 1, hipReadModeElementType> texNorm3; // Half precision input spinor field texture<short4, 1, hipReadModeNormalizedFloat> texHalf4; texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt4; texture<float, 1, hipReadModeElementType> texNorm4; // Half precision input spinor field texture<short4, 1, hipReadModeNormalizedFloat> texHalf5; texture<short2, 1, hipReadModeNormalizedFloat> texHalfSt5; texture<float, 1, hipReadModeElementType> texNorm5; void bindTexture(const cudaColorSpinorField *x, const cudaColorSpinorField *y=0, const cudaColorSpinorField *z=0, const cudaColorSpinorField *w=0, const cudaColorSpinorField *u=0) { QudaPrecision precision = x->Precision(); size_t spinor_bytes = x->Bytes() * precision; size_t norm_bytes = x->NormBytes(); if (precision == QUDA_DOUBLE_PRECISION) { hipBindTexture(0, xTexDouble2, x->V(), spinor_bytes); if (y) hipBindTexture(0, yTexDouble2, y->V(), spinor_bytes); if (z) hipBindTexture(0, zTexDouble2, z->V(), spinor_bytes); if (w) hipBindTexture(0, wTexDouble2, w->V(), spinor_bytes); if (u) hipBindTexture(0, uTexDouble2, u->V(), spinor_bytes); } else if (precision == QUDA_SINGLE_PRECISION) { if (x->Nspin() == 4) { hipBindTexture(0, xTexSingle4, x->V(), spinor_bytes); } else if (x->Nspin() == 1) { hipBindTexture(0, xTexSingle2, x->V(), spinor_bytes); if (y) hipBindTexture(0, yTexSingle2, y->V(), spinor_bytes); } else { errorQuda("Number of spins undefined"); } } else if (precision == QUDA_HALF_PRECISION) { if (x->Nspin() == 4){ //wilson hipBindTexture(0, texHalf1, x->V(), spinor_bytes); hipBindTexture(0, texNorm1, x->Norm(), norm_bytes); if (y) { hipBindTexture(0, texHalf2, y->V(), spinor_bytes); hipBindTexture(0, texNorm2, y->Norm(), norm_bytes); } if (z) { hipBindTexture(0, texHalf3, z->V(), spinor_bytes); hipBindTexture(0, texNorm3, z->Norm(), norm_bytes); } if (w) { hipBindTexture(0, texHalf4, w->V(), spinor_bytes); hipBindTexture(0, texNorm4, w->Norm(), norm_bytes); } if (u) { hipBindTexture(0, texHalf5, u->V(), spinor_bytes); hipBindTexture(0, texNorm5, u->Norm(), norm_bytes); } } else if (x->Nspin() == 1){ //staggered hipBindTexture(0, texHalfSt1, x->V(), spinor_bytes); hipBindTexture(0, texNorm1, x->Norm(), norm_bytes); if (y) { hipBindTexture(0, texHalfSt2, y->V(), spinor_bytes); hipBindTexture(0, texNorm2, y->Norm(), norm_bytes); } if (z) { hipBindTexture(0, texHalfSt3, z->V(), spinor_bytes); hipBindTexture(0, texNorm3, z->Norm(), norm_bytes); } if (w) { hipBindTexture(0, texHalfSt4, w->V(), spinor_bytes); hipBindTexture(0, texNorm4, w->Norm(), norm_bytes); } if (u) { hipBindTexture(0, texHalfSt5, u->V(), spinor_bytes); hipBindTexture(0, texNorm5, u->Norm(), norm_bytes); } } else { errorQuda("Number of spins undefined"); } } else { errorQuda("Precision undefined"); } } #define checkSpinor(a, b) \ { \ if (a.Precision() != b.Precision()) \ errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } // For kernels with precision conversion built in #define checkSpinorLength(a, b) \ { \ if (a.Length() != b.Length()) { \ errorQuda("engths do not match: %d %d", a.Length(), b.Length()); \ } __global__ void convertDSKernel(double2 *dst, float4 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<6; k++) { dst[2*k*length+i].x = src[k*length+i].x; dst[2*k*length+i].y = src[k*length+i].y; dst[(2*k+1)*length+i].x = src[k*length+i].z; dst[(2*k+1)*length+i].y = src[k*length+i].w; } i += gridSize; } } __global__ void convertDSKernel(double2 *dst, float2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<3; k++) { dst[k*length+i].x = src[k*length+i].x; dst[k*length+i].y = src[k*length+i].y; } i += gridSize; } } __global__ void convertSDKernel(float4 *dst, double2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<6; k++) { dst[k*length+i].x = src[2*k*length+i].x; dst[k*length+i].y = src[2*k*length+i].y; dst[k*length+i].z = src[(2*k+1)*length+i].x; dst[k*length+i].w = src[(2*k+1)*length+i].y; } i += gridSize; } } __global__ void convertSDKernel(float2 *dst, double2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<3; k++) { dst[k*length+i].x = src[k*length+i].x; dst[k*length+i].y = src[k*length+i].y; } i += gridSize; } } __global__ void convertHSKernel(short4 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { float4 F0 = tex1Dfetch(xTexSingle4, i + 0*length); float4 F1 = tex1Dfetch(xTexSingle4, i + 1*length); float4 F2 = tex1Dfetch(xTexSingle4, i + 2*length); float4 F3 = tex1Dfetch(xTexSingle4, i + 3*length); float4 F4 = tex1Dfetch(xTexSingle4, i + 4*length); float4 F5 = tex1Dfetch(xTexSingle4, i + 5*length); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, norm, F, length); i += gridSize; } } __global__ void convertHSKernel(short2 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { float2 F0 = tex1Dfetch(xTexSingle2, i + 0*length); float2 F1 = tex1Dfetch(xTexSingle2, i + 1*length); float2 F2 = tex1Dfetch(xTexSingle2, i + 2*length); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, norm, F, length); i += gridSize; } } __global__ void convertSHKernel(float4 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i<real_length) { RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length); res[0*length+i] = I0; res[1*length+i] = I1; res[2*length+i] = I2; res[3*length+i] = I3; res[4*length+i] = I4; res[5*length+i] = I5; i += gridSize; } } __global__ void convertSHKernel(float2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i<real_length) { RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length); res[0*length+i] = I0; res[1*length+i] = I1; res[2*length+i] = I2; i += gridSize; } } __global__ void convertHDKernel(short4 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { double2 F0 = fetch_double2(xTexDouble2, i+0*length); double2 F1 = fetch_double2(xTexDouble2, i+1*length); double2 F2 = fetch_double2(xTexDouble2, i+2*length); double2 F3 = fetch_double2(xTexDouble2, i+3*length); double2 F4 = fetch_double2(xTexDouble2, i+4*length); double2 F5 = fetch_double2(xTexDouble2, i+5*length); double2 F6 = fetch_double2(xTexDouble2, i+6*length); double2 F7 = fetch_double2(xTexDouble2, i+7*length); double2 F8 = fetch_double2(xTexDouble2, i+8*length); double2 F9 = fetch_double2(xTexDouble2, i+9*length); double2 F10 = fetch_double2(xTexDouble2, i+10*length); double2 F11 = fetch_double2(xTexDouble2, i+11*length); CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, norm, F, length); i += gridSize; } } __global__ void convertHDKernel(short2 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { double2 F0 = fetch_double2(xTexDouble2, i+0*length); double2 F1 = fetch_double2(xTexDouble2, i+1*length); double2 F2 = fetch_double2(xTexDouble2, i+2*length); CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, norm, F, length); i += gridSize; } } __global__ void convertDHKernel(double2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length); res[0*length+i] = make_double2(I0.x, I0.y); res[1*length+i] = make_double2(I0.z, I0.w); res[2*length+i] = make_double2(I1.x, I1.y); res[3*length+i] = make_double2(I1.z, I1.w); res[4*length+i] = make_double2(I2.x, I2.y); res[5*length+i] = make_double2(I2.z, I2.w); res[6*length+i] = make_double2(I3.x, I3.y); res[7*length+i] = make_double2(I3.z, I3.w); res[8*length+i] = make_double2(I4.x, I4.y); res[9*length+i] = make_double2(I4.z, I4.w); res[10*length+i] = make_double2(I5.x, I5.y); res[11*length+i] = make_double2(I5.z, I5.w); i += gridSize; } } __global__ void convertDHKernelSt(double2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length); res[0*length+i] = make_double2(I0.x, I0.y); res[1*length+i] = make_double2(I1.x, I1.y); res[2*length+i] = make_double2(I2.x, I2.y); i += gridSize; } } void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) { if (&src == &dst) return; // aliasing fields if (src.Nspin() != 1 && src.Nspin() != 4){ errorQuda("nSpin(%d) not supported in function %s, line %d\n", src.Nspin(), __FUNCTION__, __LINE__); } if ((dst.Precision() == QUDA_HALF_PRECISION || src.Precision() == QUDA_HALF_PRECISION) && (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET)) { copyCuda(dst.Even(), src.Even()); copyCuda(dst.Odd(), src.Odd()); return; } // For a given dst precision, there are two non-trivial possibilities for the // src precision. The higher one corresponds to kernel index 0 (in the table // of block and grid dimensions), while the lower one corresponds to index 1. int id; if (src.Precision() == QUDA_DOUBLE_PRECISION || dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { id = 0; } else { id = 1; } setBlock(id, dst.Stride(), dst.Precision()); quda::blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision()); if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertDSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), (float4*)src.V(), src.Stride()); }else{ //src.Nspin() == 1 hipLaunchKernelGGL(( convertDSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), (float2*)src.V(), src.Stride()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) { if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertSDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float4*)dst.V(), (double2*)src.V(), src.Stride()); }else{ //src.Nspin() ==1 hipLaunchKernelGGL(( convertSDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)dst.V(), (double2*)src.V(), src.Stride()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) { quda::blas_bytes += src.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertSHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float4*)dst.V(), src.Stride(), src.Volume()); hipUnbindTexture(texHalf1); hipUnbindTexture(texNorm1); }else{ //nSpin== 1; hipLaunchKernelGGL(( convertSHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)dst.V(), src.Stride(), src.Volume()); hipUnbindTexture(texHalfSt1); hipUnbindTexture(texNorm1); } } else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { quda::blas_bytes += dst.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertHSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); hipUnbindTexture(xTexSingle4); }else{ //nSpin == 1 hipLaunchKernelGGL(( convertHSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); hipUnbindTexture(xTexSingle2); } } else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) { quda::blas_bytes += src.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertDHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), src.Stride(), src.Volume()); hipUnbindTexture(texHalf1); hipUnbindTexture(texNorm1); }else{//nSpinr == 1 hipLaunchKernelGGL(( convertDHKernelSt), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)dst.V(), src.Stride(), src.Volume()); hipUnbindTexture(texHalfSt1); hipUnbindTexture(texNorm1); } } else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) { quda::blas_bytes += dst.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ hipLaunchKernelGGL(( convertHDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); }else{ //nSpinr == 1 hipLaunchKernelGGL(( convertHDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); } hipUnbindTexture(xTexDouble2); } else { hipMemcpy(dst.V(), src.V(), dst.Bytes(), hipMemcpyDeviceToDevice); if (dst.Precision() == QUDA_HALF_PRECISION) { hipMemcpy(dst.Norm(), src.Norm(), dst.Bytes()/(dst.Ncolor()*dst.Nspin()), hipMemcpyDeviceToDevice); quda::blas_bytes += 2*dst.RealLength()*sizeof(float); } } if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpbyKernel(Float a, Float2 *x, Float b, Float2 *y, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { y[i] = a*x[i] + b*y[i]; i += gridSize; } } __global__ void axpbyHKernel(float a, float b, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPBY_FLOAT4(a, x0, b, y0); AXPBY_FLOAT4(a, x1, b, y1); AXPBY_FLOAT4(a, x2, b, y2); AXPBY_FLOAT4(a, x3, b, y3); AXPBY_FLOAT4(a, x4, b, y4); AXPBY_FLOAT4(a, x5, b, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void axpbyHKernel(float a, float b, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AXPBY_FLOAT2(a, x0, b, y0); AXPBY_FLOAT2(a, x1, b, y1); AXPBY_FLOAT2(a, x2, b, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = a*x[i] + b*y[i] void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) { setBlock(2, x.Length(), x.Precision()); checkSpinor(x, y); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpbyCuda(a, x.Even(), b, y.Even()); axpbyCuda(a, x.Odd(), b, y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( axpbyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), b, (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( axpbyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float)b, (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( axpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1) {//staggered hipLaunchKernelGGL(( axpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 3*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float> __global__ void xpyKernel(Float *x, Float *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] += x[i]; i += gridSize; } } __global__ void xpyHKernel(short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); XPY_FLOAT4(x0, y0); XPY_FLOAT4(x1, y1); XPY_FLOAT4(x2, y2); XPY_FLOAT4(x3, y3); XPY_FLOAT4(x4, y4); XPY_FLOAT4(x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void xpyHKernel(short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); XPY_FLOAT2(x0, y0); XPY_FLOAT2(x1, y1); XPY_FLOAT2(x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = x[i] + y[i] void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(3, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { xpyCuda(x.Even(), y.Even()); xpyCuda(x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( xpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( xpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( xpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( xpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyKernel(Float a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] += a*x[i]; i += gridSize; } } __global__ void axpyHKernel(float a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPY_FLOAT4(a, x0, y0); AXPY_FLOAT4(a, x1, y1); AXPY_FLOAT4(a, x2, y2); AXPY_FLOAT4(a, x3, y3); AXPY_FLOAT4(a, x4, y4); AXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void axpyHKernel(float a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AXPY_FLOAT2(a, x0, y0); AXPY_FLOAT2(a, x1, y1); AXPY_FLOAT2(a, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = a*x[i] + y[i] void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(4, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpyCuda(a, x.Even(), y.Even()); axpyCuda(a, x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( axpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( axpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( axpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( axpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 2*x.RealLength(); if (!blasTuning) checkCudaError(); // blas_quda may require new blas_param from blas_test } template <typename Float, typename Float2> __global__ void xpayKernel(const Float2 *x, Float a, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] = x[i] + a*y[i]; i += gridSize; } } __global__ void xpayHKernel(float a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); XPAY_FLOAT4(x0, a, y0); XPAY_FLOAT4(x1, a, y1); XPAY_FLOAT4(x2, a, y2); XPAY_FLOAT4(x3, a, y3); XPAY_FLOAT4(x4, a, y4); XPAY_FLOAT4(x5, a, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void xpayHKernel(float a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); XPAY_FLOAT2(x0, a, y0); XPAY_FLOAT2(x1, a, y1); XPAY_FLOAT2(x2, a, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = x[i] + a*y[i] void xpayCuda(const cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(5, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { xpayCuda(x.Even(), a, y.Even()); xpayCuda(x.Odd(), a, y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( xpayKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), a, (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( xpayKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float)a, (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( xpayHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() ==1){ //staggered hipLaunchKernelGGL(( xpayHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 2*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float> __global__ void mxpyKernel(Float *x, Float *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] -= x[i]; i += gridSize; } } __global__ void mxpyHKernel(short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); MXPY_FLOAT4(x0, y0); MXPY_FLOAT4(x1, y1); MXPY_FLOAT4(x2, y2); MXPY_FLOAT4(x3, y3); MXPY_FLOAT4(x4, y4); MXPY_FLOAT4(x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void mxpyHKernel(short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); MXPY_FLOAT2(x0, y0); MXPY_FLOAT2(x1, y1); MXPY_FLOAT2(x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] -= x[i] (minus x plus y) void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(6, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { mxpyCuda(x.Even(), y.Even()); mxpyCuda(x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( mxpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( mxpyKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( mxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1) { //staggered hipLaunchKernelGGL(( mxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axKernel(Float a, Float2 *x, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { x[i] *= a; i += gridSize; } } __global__ void axHKernel(float a, short4 *xH, float *xN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2); AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axHKernel(float a, short2 *xH, float *xN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operation x[i] = a*x[i] void axCuda(const double &a, cudaColorSpinorField &x) { setBlock(7, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axCuda(a, x.Even()); axCuda(a, x.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { hipLaunchKernelGGL(( axKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double*)x.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( axKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), x.Length()/2); } else { bindTexture(&x); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( axHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short4*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume()); }else if (x.Nspin() ==1){ //staggered hipLaunchKernelGGL(( axHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (short2*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 2*x.Volume()*sizeof(float); } quda::blas_bytes += 2*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpyDKernel(Float2 a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z = READ_DOUBLE2_TEXTURE(x, i); y[i].x += a.x*Z.x - a.y*Z.y; y[i].y += a.y*Z.x + a.x*Z.y; i += gridSize; } } template <typename Float2> __global__ void caxpySKernel(Float2 a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z = read_Float2(x, i); y[i].x += a.x*Z.x - a.y*Z.y; y[i].y += a.y*Z.x + a.x*Z.y; i += gridSize; } } __global__ void caxpyHKernel(float2 a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPY_FLOAT4(a, x0, y0); CAXPY_FLOAT4(a, x1, y1); CAXPY_FLOAT4(a, x2, y2); CAXPY_FLOAT4(a, x3, y3); CAXPY_FLOAT4(a, x4, y4); CAXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpyHKernel(float2 a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPY_FLOAT2(a, x0, y0); CAXPY_FLOAT2(a, x1, y1); CAXPY_FLOAT2(a, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] += a*x[i] void caxpyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(8, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpyCuda(a, x.Even(), y.Even()); caxpyCuda(a, x.Odd(), y.Odd()); return; } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 4*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); double2 a2 = make_double2(real(a), imag(a)); hipLaunchKernelGGL(( caxpyDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); hipLaunchKernelGGL(( caxpySKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), (float2*)y.V(), length); } else { bindTexture(&x, &y); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( caxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( caxpyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else { errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbyDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z1 = READ_DOUBLE2_TEXTURE(x, i); Float2 Z2 = READ_DOUBLE2_TEXTURE(y, i); y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y; y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y; i += gridSize; } } template <typename Float2> __global__ void caxpbySKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z1 = read_Float2(x, i); Float2 Z2 = read_Float2(y, i); y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y; y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y; i += gridSize; } } __global__ void caxpbyHKernel(float2 a, float2 b, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPBY_FLOAT4(a, x0, b, y0); CAXPBY_FLOAT4(a, x1, b, y1); CAXPBY_FLOAT4(a, x2, b, y2); CAXPBY_FLOAT4(a, x3, b, y3); CAXPBY_FLOAT4(a, x4, b, y4); CAXPBY_FLOAT4(a, x5, b, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpbyHKernel(float2 a, float2 b, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPBY_FLOAT2(a, x0, b, y0); CAXPBY_FLOAT2(a, x1, b, y1); CAXPBY_FLOAT2(a, x2, b, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = c*x[i] + b*y[i] void caxpbyCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(9, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbyCuda(a, x.Even(), b, y.Even()); caxpbyCuda(a, x.Odd(), b, y.Odd()); return; } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 7*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y); hipLaunchKernelGGL(( caxpbyDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); hipLaunchKernelGGL(( caxpbySKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), length); } else { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( caxpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( caxpbyHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void cxpaypbzDKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 T1 = READ_DOUBLE2_TEXTURE(x, i); Float2 T2 = READ_DOUBLE2_TEXTURE(y, i); Float2 T3 = read_Float2(z, i); T1.x += a.x*T2.x - a.y*T2.y; T1.y += a.y*T2.x + a.x*T2.y; T1.x += b.x*T3.x - b.y*T3.y; T1.y += b.y*T3.x + b.x*T3.y; z[i] = make_Float2(T1); i += gridSize; } } template <typename Float2> __global__ void cxpaypbzSKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 T1 = read_Float2(x, i); Float2 T2 = read_Float2(y, i); Float2 T3 = read_Float2(z, i); T1.x += a.x*T2.x - a.y*T2.y; T1.y += a.y*T2.x + a.x*T2.y; T1.x += b.x*T3.x - b.y*T3.y; T1.y += b.y*T3.x + b.x*T3.y; z[i] = make_Float2(T1); i += gridSize; } } __global__ void cxpaypbzHKernel(float2 a, float2 b, short4 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CXPAYPBZ_FLOAT4(x0, a, y0, b, z0); CXPAYPBZ_FLOAT4(x1, a, y1, b, z1); CXPAYPBZ_FLOAT4(x2, a, y2, b, z2); CXPAYPBZ_FLOAT4(x3, a, y3, b, z3); CXPAYPBZ_FLOAT4(x4, a, y4, b, z4); CXPAYPBZ_FLOAT4(x5, a, y5, b, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); i += gridSize; } } __global__ void cxpaypbzHKernel(float2 a, float2 b, short2 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CXPAYPBZ_FLOAT2(x0, a, y0, b, z0); CXPAYPBZ_FLOAT2(x1, a, y1, b, z1); CXPAYPBZ_FLOAT2(x2, a, y2, b, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); i += gridSize; } } // performs the operation z[i] = x[i] + a*y[i] + b*z[i] void cxpaypbzCuda(cudaColorSpinorField &x, const quda::Complex &a, cudaColorSpinorField &y, const quda::Complex &b, cudaColorSpinorField &z) { checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; setBlock(10, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { cxpaypbzCuda(x.Even(), a, y.Even(), b, z.Even()); cxpaypbzCuda(x.Odd(), a, y.Odd(), b, z.Odd()); return; } quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y); hipLaunchKernelGGL(( cxpaypbzDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double2*)x.V(), a2, (double2*)y.V(), b2, (double2*)z.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); hipLaunchKernelGGL(( cxpaypbzSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float2*)x.V(), a2, (float2*)y.V(), b2, (float2*)z.V(), length); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() ==4 ){//wilson hipLaunchKernelGGL(( cxpaypbzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() ==1 ){//staggered hipLaunchKernelGGL(( cxpaypbzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyBzpcxDKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = READ_DOUBLE2_TEXTURE(x, i); Float2 z_i = READ_DOUBLE2_TEXTURE(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = b*z_i.x + c*x_i.x; x[i].y = b*z_i.y + c*x_i.y; i += gridSize; } } template <typename Float, typename Float2> __global__ void axpyBzpcxSKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = read_Float2(x, i); Float2 z_i = read_Float2(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = b*z_i.x + c*x_i.x; x[i].y = b*z_i.y + c*x_i.y; i += gridSize; } } __global__ void axpyBzpcxHKernel(float a, float b, float c, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); AXPY_FLOAT4(a, x0, y0); AXPBY_FLOAT4(b, z0, c, x0); AXPY_FLOAT4(a, x1, y1); AXPBY_FLOAT4(b, z1, c, x1); AXPY_FLOAT4(a, x2, y2); AXPBY_FLOAT4(b, z2, c, x2); AXPY_FLOAT4(a, x3, y3); AXPBY_FLOAT4(b, z3, c, x3); AXPY_FLOAT4(a, x4, y4); AXPBY_FLOAT4(b, z4, c, x4); AXPY_FLOAT4(a, x5, y5); AXPBY_FLOAT4(b, z5, c, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axpyBzpcxHKernel(float a, float b, float c, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); AXPY_FLOAT2(a, x0, y0); AXPBY_FLOAT2(b, z0, c, x0); AXPY_FLOAT2(a, x1, y1); AXPBY_FLOAT2(b, z1, c, x1); AXPY_FLOAT2(a, x2, y2); AXPBY_FLOAT2(b, z2, c, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operations: {y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]} void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b, cudaColorSpinorField& z, const double &c) { checkSpinor(x,y); checkSpinor(x,z); setBlock(11, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET){ axpyBzpcxCuda(a, x.Even(), y.Even(), b, z.Even(), c); axpyBzpcxCuda(a, x.Odd(), y.Odd(), b, z.Odd(), c); return ; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, 0, &z); hipLaunchKernelGGL(( axpyBzpcxDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double2*)x.V(), (double2*)y.V(), b, (double2*)z.V(), c, x.Length()/2); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( axpyBzpcxSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float2*)y.V(), (float)b, (float2*)z.V(), (float)c, x.Length()/2); } else { bindTexture(&x, &y, &z); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( axpyBzpcxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (float)c, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( axpyBzpcxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (float)c, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 5*x.Volume()*sizeof(float); } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 10*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyZpbxDKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = READ_DOUBLE2_TEXTURE(x, i); Float2 z_i = READ_DOUBLE2_TEXTURE(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = z_i.x + b*x_i.x; x[i].y = z_i.y + b*x_i.y; i += gridSize; } } template <typename Float, typename Float2> __global__ void axpyZpbxSKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = read_Float2(x, i); Float2 z_i = read_Float2(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = z_i.x + b*x_i.x; x[i].y = z_i.y + b*x_i.y; i += gridSize; } } __global__ void axpyZpbxHKernel(float a, float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPY_FLOAT4(a, x0, y0); AXPY_FLOAT4(a, x1, y1); AXPY_FLOAT4(a, x2, y2); AXPY_FLOAT4(a, x3, y3); AXPY_FLOAT4(a, x4, y4); AXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); XPAY_FLOAT4(z0, b, x0); XPAY_FLOAT4(z1, b, x1); XPAY_FLOAT4(z2, b, x2); XPAY_FLOAT4(z3, b, x3); XPAY_FLOAT4(z4, b, x4); XPAY_FLOAT4(z5, b, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axpyZpbxHKernel(float a, float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); AXPY_FLOAT2(a, x0, y0); XPAY_FLOAT2(z0, b, x0); AXPY_FLOAT2(a, x1, y1); XPAY_FLOAT2(z1, b, x1); AXPY_FLOAT2(a, x2, y2); XPAY_FLOAT2(z2, b, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operations: {y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]} void axpyZpbxCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z, const double &b) { checkSpinor(x,y); checkSpinor(x,z); setBlock(12, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpyZpbxCuda(a, x.Even(), y.Even(), z.Even(), b); axpyZpbxCuda(a, x.Odd(), y.Odd(), z.Odd(), b); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, 0, &z); hipLaunchKernelGGL(( axpyZpbxDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), b, x.Length()/2); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { hipLaunchKernelGGL(( axpyZpbxSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), (float)b, x.Length()/2); } else { bindTexture(&x, &y, &z); int spinor_bytes = x.Length()*sizeof(short); if (x.Nspin() ==4){ //wilson hipLaunchKernelGGL(( axpyZpbxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( axpyZpbxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, (float)b, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 5*x.Volume()*sizeof(float); } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbypzYmbwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = READ_DOUBLE2_TEXTURE(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = READ_DOUBLE2_TEXTURE(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); Float2 W = read_Float2(w, i); Y.x -= b.x*W.x - b.y*W.y; Y.y -= b.y*W.x + b.x*W.y; y[i] = make_Float2(Y); i += gridSize; } } template <typename Float2> __global__ void caxpbypzYmbwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = read_Float2(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = read_Float2(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); Float2 W = read_Float2(w, i); Y.x -= b.x*W.x - b.y*W.y; Y.y -= b.y*W.x + b.x*W.y; y[i] = make_Float2(Y); i += gridSize; } } __global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); READ_HALF_SPINOR(w, texHalf4, stride); float2 b2 = -wc*b; CAXPY_FLOAT4(b2, w0, y0); CAXPY_FLOAT4(b2, w1, y1); CAXPY_FLOAT4(b2, w2, y2); CAXPY_FLOAT4(b2, w3, y3); CAXPY_FLOAT4(b2, w4, y4); CAXPY_FLOAT4(b2, w5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); READ_HALF_SPINOR_ST(w, texHalfSt4, stride); float2 b2 = -wc*b; CAXPY_FLOAT2(b2, w0, y0); CAXPY_FLOAT2(b2, w1, y1); CAXPY_FLOAT2(b2, w2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i] void caxpbypzYmbwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) { checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); int length = x.Length()/2; setBlock(13, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypzYmbwCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even()); caxpbypzYmbwCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z); hipLaunchKernelGGL(( caxpbypzYmbwDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), (double2*)w.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); hipLaunchKernelGGL(( caxpbypzYmbwSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), (float2*)w.V(), length); } else { quda::blas_bytes += 6*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); bindTexture(&x, &y, &z, &w); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( caxpbypzYmbwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( caxpbypzYmbwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 6*x.RealLength()*x.Precision(); quda::blas_flops += 12*x.RealLength(); if (!blasTuning) checkCudaError(); } #if (__COMPUTE_CAPABILITY__ < 130) // Computes c = a + b in "double single" precision. __device__ void dsadd(volatile QudaSumFloat &c0, volatile QudaSumFloat &c1, const volatile QudaSumFloat &a0, const volatile QudaSumFloat &a1, const float b0, const float b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0 + b0; QudaSumFloat e = t1 - a0; QudaSumFloat t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1; // The result is t1 + t2, after normalization. c0 = e = t1 + t2; c1 = t2 - (e - t1); } // Computes c = a + b in "double single" precision (complex version) __device__ void zcadd(volatile QudaSumComplex &c0, volatile QudaSumComplex &c1, const volatile QudaSumComplex &a0, const volatile QudaSumComplex &a1, const volatile QudaSumComplex &b0, const volatile QudaSumComplex &b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0.x + b0.x; QudaSumFloat e = t1 - a0.x; QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x; // The result is t1 + t2, after normalization. c0.x = e = t1 + t2; c1.x = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.y + b0.y; e = t1 - a0.y; t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y; // The result is t1 + t2, after normalization. c0.y = e = t1 + t2; c1.y = t2 - (e - t1); } // Computes c = a + b in "double single" precision (float3 version) __device__ void dsadd3(volatile QudaSumFloat3 &c0, volatile QudaSumFloat3 &c1, const volatile QudaSumFloat3 &a0, const volatile QudaSumFloat3 &a1, const volatile QudaSumFloat3 &b0, const volatile QudaSumFloat3 &b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0.x + b0.x; QudaSumFloat e = t1 - a0.x; QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x; // The result is t1 + t2, after normalization. c0.x = e = t1 + t2; c1.x = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.y + b0.y; e = t1 - a0.y; t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y; // The result is t1 + t2, after normalization. c0.y = e = t1 + t2; c1.y = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.z + b0.z; e = t1 - a0.z; t2 = ((b0.z - e) + (a0.z - (t1 - e))) + a1.z + b1.z; // The result is t1 + t2, after normalization. c0.z = e = t1 + t2; c1.z = t2 - (e - t1); } #endif // // double normCuda(float *a, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normD##suffix #define REDUCE_TYPES Float *a #define REDUCE_PARAMS a #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i]*a[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normS##suffix #define REDUCE_TYPES Float *a #define REDUCE_PARAMS a #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION // // double normHCuda(char *, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normH##suffix #define REDUCE_TYPES Float *aN, int stride // dummy type #define REDUCE_PARAMS aN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ REAL_DOT_FLOAT4(norm0, a0, a0); \ REAL_DOT_FLOAT4(norm1, a1, a1); \ REAL_DOT_FLOAT4(norm2, a2, a2); \ REAL_DOT_FLOAT4(norm3, a3, a3); \ REAL_DOT_FLOAT4(norm4, a4, a4); \ REAL_DOT_FLOAT4(norm5, a5, a5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_OPERATION(i) (ac*ac*norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normHSt##suffix #define REDUCE_TYPES Float *aN, int stride // dummy type #define REDUCE_PARAMS aN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ REAL_DOT_FLOAT2(norm0, a0, a0); \ REAL_DOT_FLOAT2(norm1, a1, a1); \ REAL_DOT_FLOAT2(norm2, a2, a2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_OPERATION(i) (ac*ac*norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double normCuda(const cudaColorSpinorField &a) { if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) return normCuda(a.Even()) + normCuda(a.Odd()); const int id = 14; quda::blas_flops += 2*a.RealLength(); quda::blas_bytes += a.RealLength()*a.Precision(); if (a.Precision() == QUDA_DOUBLE_PRECISION) { return normDCuda((double*)a.V(), a.Length(), id, a.Precision()); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { return normSCuda((float2*)a.V(), a.Length()/2, id, a.Precision()); } else { bindTexture(&a); quda::blas_bytes += (a.RealLength()*a.Precision()) / (a.Ncolor() * a.Nspin()); if (a.Nspin() == 4){ //wilson return normHCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else if (a.Nspin() == 1) { //staggered return normHStCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin()); return 0; } } } // // double reDotProductFCuda(float *a, float *b, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductD##suffix #define REDUCE_TYPES Float *a, Float *b #define REDUCE_PARAMS a, b #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i]*b[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductS##suffix #define REDUCE_TYPES Float *a, Float *b #define REDUCE_PARAMS a, b #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION // // double reDotProductHCuda(float *a, float *b, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductH##suffix #define REDUCE_TYPES Float *aN, Float *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ READ_HALF_SPINOR(b, texHalf2, stride); \ REAL_DOT_FLOAT4(rdot0, a0, b0); \ REAL_DOT_FLOAT4(rdot1, a1, b1); \ REAL_DOT_FLOAT4(rdot2, a2, b2); \ REAL_DOT_FLOAT4(rdot3, a3, b3); \ REAL_DOT_FLOAT4(rdot4, a4, b4); \ REAL_DOT_FLOAT4(rdot5, a5, b5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_OPERATION(i) (ac*bc*rdot0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductHSt##suffix #define REDUCE_TYPES Float *aN, Float *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \ REAL_DOT_FLOAT2(rdot0, a0, b0); \ REAL_DOT_FLOAT2(rdot1, a1, b1); \ REAL_DOT_FLOAT2(rdot2, a2, b2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_OPERATION(i) (ac*bc*rdot0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double reDotProductCuda(cudaColorSpinorField &a, cudaColorSpinorField &b) { if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) { return reDotProductCuda(a.Even(), b.Even()) + reDotProductCuda(a.Odd(), b.Odd()); } const int id = 15; quda::blas_flops += 2*a.RealLength(); checkSpinor(a, b); quda::blas_bytes += 2*a.RealLength()*a.Precision(); if (a.Precision() == QUDA_DOUBLE_PRECISION) { return reDotProductDCuda((double*)a.V(), (double*)b.V(), a.Length(), id, a.Precision()); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { return reDotProductSCuda((float2*)a.V(), (float2*)b.V(), a.Length()/2, id, a.Precision()); } else { quda::blas_bytes += 2*a.Volume()*sizeof(float); bindTexture(&a, &b); if (a.Nspin() == 4){ //wilson return reDotProductHCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else if (a.Nspin() == 1){ //staggered return reDotProductHStCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin()); return 0; } } } // // double axpyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y #define REDUCE_PARAMS a, x, y #define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i] #define REDUCE_OPERATION(i) (y[i]*y[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix #define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ AXPY_FLOAT4(a, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ AXPY_FLOAT4(a, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ AXPY_FLOAT4(a, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ AXPY_FLOAT4(a, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ AXPY_FLOAT4(a, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ AXPY_FLOAT4(a, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix #define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ AXPY_FLOAT2(a, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ AXPY_FLOAT2(a, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ AXPY_FLOAT2(a, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double axpyNormCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return axpyNormCuda(a, x.Even(), y.Even()) + axpyNormCuda(a, x.Odd(), y.Odd()); const int id = 16; quda::blas_flops += 4*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { return axpyNormFCuda(a, (double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return axpyNormFCuda((float)a, (float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return axpyNormHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return axpyNormHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double xmyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = x[i] - y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormF##suffix #define REDUCE_TYPES Float *x, Float *y #define REDUCE_PARAMS x, y #define REDUCE_AUXILIARY(i) y[i] = x[i] - y[i] #define REDUCE_OPERATION(i) (y[i]*y[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix #define REDUCE_TYPES Float *d1, Float *d2, short4 *yH, float *yN, int stride #define REDUCE_PARAMS d1, d2, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ XMY_FLOAT4(x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ XMY_FLOAT4(x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ XMY_FLOAT4(x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ XMY_FLOAT4(x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ XMY_FLOAT4(x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ XMY_FLOAT4(x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix #define REDUCE_TYPES Float *d1, Float *d2, short2 *yH, float *yN, int stride #define REDUCE_PARAMS d1, d2, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ XMY_FLOAT2(x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ XMY_FLOAT2(x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ XMY_FLOAT2(x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double xmyNormCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return xmyNormCuda(x.Even(), y.Even()) + xmyNormCuda(x.Odd(), y.Odd()); const int id = 17; quda::blas_flops += 3*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { return xmyNormFCuda((double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return xmyNormFCuda((float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); if (x.Nspin() ==4 ){ //wilsin return xmyNormHCuda((char*)0, (char*)0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ return xmyNormHCuda((char*)0, (char*)0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double2 cDotProductCuda(float2 *x, float2 *y, int n) {} // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y, Float c #define REDUCE_PARAMS x, y, c #define REDUCE_REAL_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_IMAG_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductS##suffix #define REDUCE_TYPES Float2 *x, Float2 *y, Float c #define REDUCE_PARAMS x, y, c #define REDUCE_REAL_AUXILIARY(i) Float2 a = read_Float2(x, i); #define REDUCE_IMAG_AUXILIARY(i) Float2 b = read_Float2(y, i); #define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductH##suffix #define REDUCE_TYPES Float *aN, Float2 *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_REAL_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ READ_HALF_SPINOR(b, texHalf2, stride); \ REAL_DOT_FLOAT4(rdot0, a0, b0); \ REAL_DOT_FLOAT4(rdot1, a1, b1); \ REAL_DOT_FLOAT4(rdot2, a2, b2); \ REAL_DOT_FLOAT4(rdot3, a3, b3); \ REAL_DOT_FLOAT4(rdot4, a4, b4); \ REAL_DOT_FLOAT4(rdot5, a5, b5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, a0, b0); \ IMAG_DOT_FLOAT4(idot1, a1, b1); \ IMAG_DOT_FLOAT4(idot2, a2, b2); \ IMAG_DOT_FLOAT4(idot3, a3, b3); \ IMAG_DOT_FLOAT4(idot4, a4, b4); \ IMAG_DOT_FLOAT4(idot5, a5, b5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0) #define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductHSt##suffix #define REDUCE_TYPES Float *aN, Float2 *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_REAL_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \ REAL_DOT_FLOAT2(rdot0, a0, b0); \ REAL_DOT_FLOAT2(rdot1, a1, b1); \ REAL_DOT_FLOAT2(rdot2, a2, b2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, a0, b0); \ IMAG_DOT_FLOAT2(idot1, a1, b1); \ IMAG_DOT_FLOAT2(idot2, a2, b2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0) #define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex cDotProductCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductCuda(x.Even(), y.Even()) + cDotProductCuda(x.Odd(), y.Odd()); const int id = 18; quda::blas_flops += 4*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { char c = 0; bindTexture(&x, &y); dot = cDotProductDCuda((double2*)x.V(), (double2*)y.V(), c, length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { char c = 0; // FIXME: bindTexture() will incorrectly bind this as float4 since it queries the spin int spinor_bytes = x.Length()*sizeof(float); hipBindTexture(0, xTexSingle2, x.V(), spinor_bytes); hipBindTexture(0, yTexSingle2, y.V(), spinor_bytes); dot = cDotProductSCuda((float2*)x.V(), (float2*)y.V(), c, length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ dot = cDotProductHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ dot = cDotProductHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); } // // double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {} // // First performs the operation y = x + a*y // Second returns complex dot product (z,y) // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyD##suffix #define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z #define REDUCE_PARAMS x, a, y, z #define REDUCE_REAL_AUXILIARY(i) \ Float2 X = READ_DOUBLE2_TEXTURE(x, i); \ Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \ Float2 Z = READ_DOUBLE2_TEXTURE(z, i); #define REDUCE_IMAG_AUXILIARY(i) y[i].x = X.x + a*Y.x; y[i].y = X.y + a*Y.y #define REDUCE_REAL_OPERATION(i) (Z.x*y[i].x + Z.y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (Z.x*y[i].y - Z.y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyS##suffix #define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z #define REDUCE_PARAMS x, a, y, z #define REDUCE_REAL_AUXILIARY(i) y[i].x = x[i].x + a*y[i].x #define REDUCE_IMAG_AUXILIARY(i) y[i].y = x[i].y + a*y[i].y #define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix #define REDUCE_TYPES Float a, short4 *yH, Float2 *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ XPAY_FLOAT4(x0, a, y0); \ XPAY_FLOAT4(x1, a, y1); \ XPAY_FLOAT4(x2, a, y2); \ XPAY_FLOAT4(x3, a, y3); \ XPAY_FLOAT4(x4, a, y4); \ XPAY_FLOAT4(x5, a, y5); \ REAL_DOT_FLOAT4(rdot0, z0, y0); \ REAL_DOT_FLOAT4(rdot1, z1, y1); \ REAL_DOT_FLOAT4(rdot2, z2, y2); \ REAL_DOT_FLOAT4(rdot3, z3, y3); \ REAL_DOT_FLOAT4(rdot4, z4, y4); \ REAL_DOT_FLOAT4(rdot5, z5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, z0, y0); \ IMAG_DOT_FLOAT4(idot1, z1, y1); \ IMAG_DOT_FLOAT4(idot2, z2, y2); \ IMAG_DOT_FLOAT4(idot3, z3, y3); \ IMAG_DOT_FLOAT4(idot4, z4, y4); \ IMAG_DOT_FLOAT4(idot5, z5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix #define REDUCE_TYPES Float a, short2 *yH, Float2 *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ XPAY_FLOAT2(x0, a, y0); \ XPAY_FLOAT2(x1, a, y1); \ XPAY_FLOAT2(x2, a, y2); \ REAL_DOT_FLOAT2(rdot0, z0, y0); \ REAL_DOT_FLOAT2(rdot1, z1, y1); \ REAL_DOT_FLOAT2(rdot2, z2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, z0, y0); \ IMAG_DOT_FLOAT2(idot1, z1, y1); \ IMAG_DOT_FLOAT2(idot2, z2, y2); \ idot0 += idot1; idot0 += idot2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex xpaycDotzyCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return xpaycDotzyCuda(x.Even(), a, y.Even(), z.Even()) + xpaycDotzyCuda(x.Odd(), a, y.Odd(), z.Odd()); const int id = 19; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; quda::blas_bytes += 4*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y, &z); dot = xpaycDotzyDCuda((double2*)x.V(), a, (double2*)y.V(), (double2*)z.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { dot = xpaycDotzySCuda((float2*)x.V(), (float)a, (float2*)y.V(), (float2*)z.V(), length, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); if (x.Nspin() ==4 ){//wilson dot = xpaycDotzyHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() ==1 ){//wilson dot = xpaycDotzyHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); } // // double3 cDotProductNormACuda(float2 *a, float2 *b, int n) {} // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y #define REDUCE_PARAMS x, y #define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x) #define REDUCE_Z_OPERATION(i) (a.x*a.x + a.y*a.y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAS##suffix #define REDUCE_TYPES Float2 *a, Float2 *b #define REDUCE_PARAMS a, b #define REDUCE_X_AUXILIARY(i) #define REDUCE_Y_AUXILIARY(i) #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x) #define REDUCE_Z_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAH##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR(x, texHalf1, stride); \ READ_HALF_SPINOR(y, texHalf2, stride); \ REAL_DOT_FLOAT4(norm0, x0, x0); \ REAL_DOT_FLOAT4(norm1, x1, x1); \ REAL_DOT_FLOAT4(norm2, x2, x2); \ REAL_DOT_FLOAT4(norm3, x3, x3); \ REAL_DOT_FLOAT4(norm4, x4, x4); \ REAL_DOT_FLOAT4(norm5, x5, x5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, x0, y0); \ REAL_DOT_FLOAT4(rdot1, x1, y1); \ REAL_DOT_FLOAT4(rdot2, x2, y2); \ REAL_DOT_FLOAT4(rdot3, x3, y3); \ REAL_DOT_FLOAT4(rdot4, x4, y4); \ REAL_DOT_FLOAT4(rdot5, x5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, x0, y0); \ IMAG_DOT_FLOAT4(idot1, x1, y1); \ IMAG_DOT_FLOAT4(idot2, x2, y2); \ IMAG_DOT_FLOAT4(idot3, x3, y3); \ IMAG_DOT_FLOAT4(idot4, x4, y4); \ IMAG_DOT_FLOAT4(idot5, x5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (xc*xc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAHSt##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \ REAL_DOT_FLOAT2(norm0, x0, x0); \ REAL_DOT_FLOAT2(norm1, x1, x1); \ REAL_DOT_FLOAT2(norm2, x2, x2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, x0, y0); \ REAL_DOT_FLOAT2(rdot1, x1, y1); \ REAL_DOT_FLOAT2(rdot2, x2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, x0, y0); \ IMAG_DOT_FLOAT2(idot1, x1, y1); \ IMAG_DOT_FLOAT2(idot2, x2, y2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (xc*xc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION double3 cDotProductNormACuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductNormACuda(x.Even(), y.Even()) + cDotProductNormACuda(x.Odd(), y.Odd()); const int id = 20; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); return cDotProductNormADCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return cDotProductNormASCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return cDotProductNormAHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ //staggered return cDotProductNormAHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double3 cDotProductNormBCuda(float2 *a, float2 *b, int n) {} // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y #define REDUCE_PARAMS x, y #define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x) #define REDUCE_Z_OPERATION(i) (b.x*b.x + b.y*b.y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBS##suffix #define REDUCE_TYPES Float2 *a, Float2 *b #define REDUCE_PARAMS a, b #define REDUCE_X_AUXILIARY(i) #define REDUCE_Y_AUXILIARY(i) #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x) #define REDUCE_Z_OPERATION(i) (b[i].x*b[i].x + b[i].y*b[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBH##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR(x, texHalf1, stride); \ READ_HALF_SPINOR(y, texHalf2, stride); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, x0, y0); \ REAL_DOT_FLOAT4(rdot1, x1, y1); \ REAL_DOT_FLOAT4(rdot2, x2, y2); \ REAL_DOT_FLOAT4(rdot3, x3, y3); \ REAL_DOT_FLOAT4(rdot4, x4, y4); \ REAL_DOT_FLOAT4(rdot5, x5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, x0, y0); \ IMAG_DOT_FLOAT4(idot1, x1, y1); \ IMAG_DOT_FLOAT4(idot2, x2, y2); \ IMAG_DOT_FLOAT4(idot3, x3, y3); \ IMAG_DOT_FLOAT4(idot4, x4, y4); \ IMAG_DOT_FLOAT4(idot5, x5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (yc*yc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBHSt##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, x0, y0); \ REAL_DOT_FLOAT2(rdot1, x1, y1); \ REAL_DOT_FLOAT2(rdot2, x2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, x0, y0); \ IMAG_DOT_FLOAT2(idot1, x1, y1); \ IMAG_DOT_FLOAT2(idot2, x2, y2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (yc*yc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION double3 cDotProductNormBCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductNormBCuda(x.Even(), y.Even()) + cDotProductNormBCuda(x.Odd(), y.Odd()); const int id = 21; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); return cDotProductNormBDCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return cDotProductNormBSCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return cDotProductNormBHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ //staggered return cDotProductNormBHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y, // float2 *z, float2 *w, float2 *u, int len) // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYD##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u #define REDUCE_PARAMS a, x, b, y, z, w, u #define REDUCE_X_AUXILIARY(i) \ Float2 X = READ_DOUBLE2_TEXTURE(x, i); \ Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \ Float2 W = READ_DOUBLE2_TEXTURE(w, i); #define REDUCE_Y_AUXILIARY(i) \ Float2 Z = read_Float2(z, i); \ Z.x += a.x*X.x - a.y*X.y; \ Z.y += a.y*X.x + a.x*X.y; \ Z.x += b.x*Y.x - b.y*Y.y; \ Z.y += b.y*Y.x + b.x*Y.y; \ Y.x -= b.x*W.x - b.y*W.y; \ Y.y -= b.y*W.x + b.x*W.y; #define REDUCE_Z_AUXILIARY(i) \ z[i] = make_Float2(Z); \ y[i] = make_Float2(Y); #define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y) #define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x) #define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYS##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u #define REDUCE_PARAMS a, x, b, y, z, w, u #define REDUCE_X_AUXILIARY(i) \ Float2 X = read_Float2(x, i); \ Float2 Y = read_Float2(y, i); \ Float2 W = read_Float2(w, i); #define REDUCE_Y_AUXILIARY(i) \ Float2 Z = read_Float2(z, i); \ Z.x += a.x*X.x - a.y*X.y; \ Z.y += a.y*X.x + a.x*X.y; \ Z.x += b.x*Y.x - b.y*Y.y; \ Z.y += b.y*Y.x + b.x*Y.y; \ Y.x -= b.x*W.x - b.y*W.y; \ Y.y -= b.y*W.x + b.x*W.y; #define REDUCE_Z_AUXILIARY(i) \ z[i] = make_Float2(Z); \ y[i] = make_Float2(Y); #define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y) #define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x) #define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION // // double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y, // float2 *z, float2 *w, float2 *u, int len) // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix #define REDUCE_TYPES Float2 a, Float2 b, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, float *uN, int stride #define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride #define REDUCE_X_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); \ CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); \ CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); \ CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); \ CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); \ CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); \ READ_HALF_SPINOR(w, texHalf4, stride); \ float2 bwc = -wc*b; \ CAXPY_FLOAT4(bwc, w0, y0); \ CAXPY_FLOAT4(bwc, w1, y1); \ CAXPY_FLOAT4(bwc, w2, y2); \ CAXPY_FLOAT4(bwc, w3, y3); \ CAXPY_FLOAT4(bwc, w4, y4); \ CAXPY_FLOAT4(bwc, w5, y5); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_Y_AUXILIARY(i) \ READ_HALF_SPINOR(u, texHalf5, stride); \ REAL_DOT_FLOAT4(rdot0, u0, y0); \ REAL_DOT_FLOAT4(rdot1, u1, y1); \ REAL_DOT_FLOAT4(rdot2, u2, y2); \ REAL_DOT_FLOAT4(rdot3, u3, y3); \ REAL_DOT_FLOAT4(rdot4, u4, y4); \ REAL_DOT_FLOAT4(rdot5, u5, y5); \ IMAG_DOT_FLOAT4(idot0, u0, y0); \ IMAG_DOT_FLOAT4(idot1, u1, y1); \ IMAG_DOT_FLOAT4(idot2, u2, y2); \ IMAG_DOT_FLOAT4(idot3, u3, y3); \ IMAG_DOT_FLOAT4(idot4, u4, y4); \ IMAG_DOT_FLOAT4(idot5, u5, y5); #define REDUCE_Z_AUXILIARY(i) \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (uc*rdot0) #define REDUCE_Y_OPERATION(i) (uc*idot0) #define REDUCE_Z_OPERATION(i) (norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix #define REDUCE_TYPES Float2 a, Float2 b, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, float *uN, int stride #define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride #define REDUCE_X_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); \ CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); \ CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); \ READ_HALF_SPINOR_ST(w, texHalfSt4, stride); \ float2 bwc = -wc*b; \ CAXPY_FLOAT2(bwc, w0, y0); \ CAXPY_FLOAT2(bwc, w1, y1); \ CAXPY_FLOAT2(bwc, w2, y2); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_Y_AUXILIARY(i) \ READ_HALF_SPINOR_ST(u, texHalfSt5, stride); \ REAL_DOT_FLOAT2(rdot0, u0, y0); \ REAL_DOT_FLOAT2(rdot1, u1, y1); \ REAL_DOT_FLOAT2(rdot2, u2, y2); \ IMAG_DOT_FLOAT2(idot0, u0, y0); \ IMAG_DOT_FLOAT2(idot1, u1, y1); \ IMAG_DOT_FLOAT2(idot2, u2, y2); #define REDUCE_Z_AUXILIARY(i) \ norm0 += norm1; norm0 += norm2; \ rdot0 += rdot1; rdot0 += rdot2; \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (uc*rdot0) #define REDUCE_Y_OPERATION(i) (uc*idot0) #define REDUCE_Z_OPERATION(i) (norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION // This convoluted kernel does the following: z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y) double3 caxpbypzYmbwcDotProductUYNormYCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &u) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpbypzYmbwcDotProductUYNormYCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even(), u.Even()) + caxpbypzYmbwcDotProductUYNormYCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd(), u.Odd()); const int id = 22; quda::blas_flops += 18*x.RealLength(); checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); checkSpinor(x,u); int length = x.Length()/2; quda::blas_bytes += 7*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z, &w, &u); return caxpbypzYmbwcDotProductUYNormYDCuda(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), (double2*)w.V(), (double2*)u.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); return caxpbypzYmbwcDotProductUYNormYSCuda(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), (float2*)w.V(), (float2*)u.V(), length, id, x.Precision()); } else { // fused nSpin=4 kernel is slow on Fermi // N.B. this introduces an extra half truncation so will affect convergence (for the better?) if (!blasTuning && (__COMPUTE_CAPABILITY__ >= 200) && x.Nspin() == 4) { caxpbypzYmbwCuda(a, x, b, y, z, w); return cDotProductNormBCuda(u, y); } bindTexture(&x, &y, &z, &w, &u); quda::blas_bytes += 7*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4) { // wilson return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(), y.Stride(), y.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ // staggered return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(), y.Stride(), y.Volume(), id, x.Precision()); } else { errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } template <typename Float, typename Float2> __global__ void cabxpyAxKernel(Float a, Float2 b, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { x[i].x *= a; x[i].y *= a; y[i].x += b.x*x[i].x - b.y*x[i].y; y[i].y += b.y*x[i].x + b.x*x[i].y; i += gridSize; } } __global__ void cabxpyAxHKernel(float a, float2 b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2); AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); CAXPY_FLOAT4(b, x0, y0); CAXPY_FLOAT4(b, x1, y1); CAXPY_FLOAT4(b, x2, y2); CAXPY_FLOAT4(b, x3, y3); CAXPY_FLOAT4(b, x4, y4); CAXPY_FLOAT4(b, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void cabxpyAxHKernel(float a, float2 b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); CAXPY_FLOAT2(b, x0, y0); CAXPY_FLOAT2(b, x1, y1); CAXPY_FLOAT2(b, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] += a*b*x[i], x[i] *= a void cabxpyAxCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(23, length, x.Precision()); quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 5*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 b2 = make_double2(real(b), imag(b)); hipLaunchKernelGGL(( cabxpyAxKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (double)a, b2, (double2*)x.V(), (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 b2 = make_float2(real(b), imag(b)); hipLaunchKernelGGL(( cabxpyAxKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (float2*)x.V(), (float2*)y.V(), length); } else { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpyCuda(a, x.Even(), y.Even()); caxpyCuda(a, x.Odd(), y.Odd()); return; } bindTexture(&x, &y); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( cabxpyAxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( cabxpyAxHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, (float)a, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 4*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } // // double caxpyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y #define REDUCE_PARAMS a, x, y #define REDUCE_AUXILIARY(i) \ y[i].x += a.x*x[i].x - a.y*x[i].y; \ y[i].y += a.y*x[i].x + a.x*x[i].y #define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix #define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ CAXPY_FLOAT4(a, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ CAXPY_FLOAT4(a, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ CAXPY_FLOAT4(a, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ CAXPY_FLOAT4(a, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ CAXPY_FLOAT4(a, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ CAXPY_FLOAT4(a, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix #define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ CAXPY_FLOAT2(a, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ CAXPY_FLOAT2(a, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ CAXPY_FLOAT2(a, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double caxpyNormCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyNormCuda(a, x.Even(), y.Even()) + caxpyNormCuda(a, x.Odd(), y.Odd()); const int id = 24; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); return caxpyNormFCuda(a2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); return caxpyNormFCuda(a2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson return caxpyNormHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return caxpyNormHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double caxpyXmayNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second performs the operator x[i] -= a*z[i] // Third returns the norm of x // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y, Float *z #define REDUCE_PARAMS a, x, y, z #define REDUCE_AUXILIARY(i) \ y[i].x += a.x*x[i].x - a.y*x[i].y; \ y[i].y += a.y*x[i].x + a.x*x[i].y; \ x[i].x += a.y*z[i].y - a.x*z[i].x; \ x[i].y -= (a.x*z[i].y + a.y*z[i].x); #define REDUCE_OPERATION(i) (x[i].x*x[i].x + x[i].y*x[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix #define REDUCE_TYPES Float a, short4 *xH, float *xN, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPY_FLOAT4(a, x0, y0); \ CMAXPY_FLOAT4(a, z0, x0); \ REAL_DOT_FLOAT4(norm0, x0, x0); \ CAXPY_FLOAT4(a, x1, y1); \ CMAXPY_FLOAT4(a, z1, x1); \ REAL_DOT_FLOAT4(norm1, x1, x1); \ CAXPY_FLOAT4(a, x2, y2); \ CMAXPY_FLOAT4(a, z2, x2); \ REAL_DOT_FLOAT4(norm2, x2, x2); \ CAXPY_FLOAT4(a, x3, y3); \ CMAXPY_FLOAT4(a, z3, x3); \ REAL_DOT_FLOAT4(norm3, x3, x3); \ CAXPY_FLOAT4(a, x4, y4); \ CMAXPY_FLOAT4(a, z4, x4); \ REAL_DOT_FLOAT4(norm4, x4, x4); \ CAXPY_FLOAT4(a, x5, y5); \ CMAXPY_FLOAT4(a, z5, x5); \ REAL_DOT_FLOAT4(norm5, x5, x5); \ norm0 += norm1; norm2 += norm3; \ norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix #define REDUCE_TYPES Float a, short2 *xH, float *xN, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPY_FLOAT2(a, x0, y0); \ CMAXPY_FLOAT2(a, z0, x0); \ REAL_DOT_FLOAT2(norm0, x0, x0); \ CAXPY_FLOAT2(a, x1, y1); \ CMAXPY_FLOAT2(a, z1, x1); \ REAL_DOT_FLOAT2(norm1, x1, x1); \ CAXPY_FLOAT2(a, x2, y2); \ CMAXPY_FLOAT2(a, z2, x2); \ REAL_DOT_FLOAT2(norm2, x2, x2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double caxpyXmazNormXCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyXmazNormXCuda(a, x.Even(), y.Even(), z.Even()) + caxpyXmazNormXCuda(a, x.Odd(), y.Odd(), z.Odd()); const int id = 25; quda::blas_flops += 10*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 5*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); return caxpyXmazNormXFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); return caxpyXmazNormXFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson return caxpyXmazNormXHCuda(a2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return caxpyXmazNormXHCuda(a2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double cabxpyAxNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormF##suffix #define REDUCE_TYPES Float a, Float b, Float *x, Float *y #define REDUCE_PARAMS a, b, x, y #define REDUCE_AUXILIARY(i) \ x[i].x *= a.x; \ x[i].y *= a.x; \ y[i].x += b.x*x[i].x - b.y*x[i].y; \ y[i].y += b.y*x[i].x + b.x*x[i].y; #define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix #define REDUCE_TYPES Float a, Float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ AX_FLOAT4(a.x, x0); \ AX_FLOAT4(a.x, x1); \ AX_FLOAT4(a.x, x2); \ AX_FLOAT4(a.x, x3); \ AX_FLOAT4(a.x, x4); \ AX_FLOAT4(a.x, x5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); \ CAXPY_FLOAT4(b, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ CAXPY_FLOAT4(b, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ CAXPY_FLOAT4(b, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ CAXPY_FLOAT4(b, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ CAXPY_FLOAT4(b, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ CAXPY_FLOAT4(b, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix #define REDUCE_TYPES Float a, Float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ AX_FLOAT2(a.x, x0); \ AX_FLOAT2(a.x, x1); \ AX_FLOAT2(a.x, x2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); \ CAXPY_FLOAT2(b, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ CAXPY_FLOAT2(b, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ CAXPY_FLOAT2(b, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double cabxpyAxNormCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cabxpyAxNormCuda(a, b, x.Even(), y.Even()) + cabxpyAxNormCuda(a, b, x.Odd(), y.Odd()); const int id = 26; quda::blas_flops += 7*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 4*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(a, 0); double2 b2 = make_double2(real(b), imag(b)); return cabxpyAxNormFCuda(a2, b2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(a, 0); float2 b2 = make_float2(real(b), imag(b)); return cabxpyAxNormFCuda(a2, b2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(a, 0); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson return cabxpyAxNormHCuda(a2, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return cabxpyAxNormHCuda(a2, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } template <typename Float2> __global__ void caxpbypzDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = READ_DOUBLE2_TEXTURE(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = READ_DOUBLE2_TEXTURE(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); i += gridSize; } } template <typename Float2> __global__ void caxpbypzSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = read_Float2(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = read_Float2(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); i += gridSize; } } __global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); i += gridSize; } } __global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + z[i] void caxpbypzCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z) { checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; setBlock(27, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypzCuda(a, x.Even(), b, y.Even(), z.Even()); caxpbypzCuda(a, x.Odd(), b, y.Odd(), z.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z); hipLaunchKernelGGL(( caxpbypzDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); hipLaunchKernelGGL(( caxpbypzSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), length); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( caxpbypzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( caxpbypzHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbypczpwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 c, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 W = read_Float2(w, i); Float2 X = READ_DOUBLE2_TEXTURE(x, i); CAXPY_DOUBLE2(a, X, W); Float2 Y = READ_DOUBLE2_TEXTURE(y, i); CAXPY_DOUBLE2(b, Y, W); Float2 Z = READ_DOUBLE2_TEXTURE(z, i); CAXPY_DOUBLE2(c, Z, W); w[i] = make_Float2(W); i += gridSize; } } template <typename Float2> __global__ void caxpbypczpwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 c, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 W = read_Float2(w, i); Float2 X = read_Float2(x, i); CAXPY_FLOAT2(a, X, W); Float2 Y = read_Float2(y, i); CAXPY_FLOAT2(b, Y, W); Float2 Z = read_Float2(z, i); CAXPY_FLOAT2(c, Z, W); w[i] = make_Float2(W); i += gridSize; } } __global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, short4* wH, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(w, texHalf4, texNorm4, stride); RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); CAXPY_FLOAT4(a, x0, w0); CAXPY_FLOAT4(a, x1, w1); CAXPY_FLOAT4(a, x2, w2); CAXPY_FLOAT4(a, x3, w3); CAXPY_FLOAT4(a, x4, w4); CAXPY_FLOAT4(a, x5, w5); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPY_FLOAT4(b, y0, w0); CAXPY_FLOAT4(b, y1, w1); CAXPY_FLOAT4(b, y2, w2); CAXPY_FLOAT4(b, y3, w3); CAXPY_FLOAT4(b, y4, w4); CAXPY_FLOAT4(b, y5, w5); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPY_FLOAT4(c, z0, w0); CAXPY_FLOAT4(c, z1, w1); CAXPY_FLOAT4(c, z2, w2); CAXPY_FLOAT4(c, z3, w3); CAXPY_FLOAT4(c, z4, w4); CAXPY_FLOAT4(c, z5, w5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(wH, wN, w, stride); i += gridSize; } } __global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, short2 *wH, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(w, texHalfSt4, texNorm4, stride); RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); CAXPY_FLOAT2(a, x0, w0); CAXPY_FLOAT2(a, x1, w1); CAXPY_FLOAT2(a, x2, w2); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPY_FLOAT2(b, y0, w0); CAXPY_FLOAT2(b, y1, w1); CAXPY_FLOAT2(b, y2, w2); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPY_FLOAT2(c, z0, w0); CAXPY_FLOAT2(c, z1, w1); CAXPY_FLOAT2(c, z2, w2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(wH, wN, w, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + c*z[i] + w[i] void caxpbypczpwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, const quda::Complex &c, cudaColorSpinorField &z, cudaColorSpinorField &w) { checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); int length = x.Length()/2; setBlock(28, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypczpwCuda(a, x.Even(), b, y.Even(), c, z.Even(), w.Even()); caxpbypczpwCuda(a, x.Odd(), b, y.Odd(), c, z.Odd(), w.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); double2 c2 = make_double2(real(c), imag(c)); bindTexture(&x, &y, &z, &w); hipLaunchKernelGGL(( caxpbypczpwDKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (double2*)x.V(), b2, (double2*)y.V(), c2, (double2*)z.V(), (double2*)w.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); float2 c2 = make_float2(real(c), imag(c)); hipLaunchKernelGGL(( caxpbypczpwSKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, (float2*)x.V(), b2, (float2*)y.V(), c2, (float2*)z.V(), (float2*)w.V(), length); } else { bindTexture(&x, &y, &z, &w); quda::blas_bytes += 6*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); float2 c2 = make_float2(real(c), imag(c)); if (x.Nspin() == 4){ //wilson hipLaunchKernelGGL(( caxpbypczpwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, c2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (short4*)w.V(), (float*)w.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered hipLaunchKernelGGL(( caxpbypczpwHKernel), dim3(blasGrid), dim3(blasBlock), 0, 0, a2, b2, c2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (short2*)w.V(), (float*)w.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 12*x.RealLength(); if (!blasTuning) checkCudaError(); } // // double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the dot product (z,y) // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyF##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 *y, Float2 *z, Float c #define REDUCE_PARAMS a, x, y, z, c #define REDUCE_REAL_AUXILIARY(i) y[i].x += a.x*x[i].x - a.y*x[i].y; #define REDUCE_IMAG_AUXILIARY(i) y[i].y += a.y*x[i].x + a.x*x[i].y; #define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix #define REDUCE_TYPES Float2 a, short4 *yH, Float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPY_FLOAT4(a, x0, y0); \ CAXPY_FLOAT4(a, x1, y1); \ CAXPY_FLOAT4(a, x2, y2); \ CAXPY_FLOAT4(a, x3, y3); \ CAXPY_FLOAT4(a, x4, y4); \ CAXPY_FLOAT4(a, x5, y5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_IMAG_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, z0, y0); \ REAL_DOT_FLOAT4(rdot1, z1, y1); \ REAL_DOT_FLOAT4(rdot2, z2, y2); \ REAL_DOT_FLOAT4(rdot3, z3, y3); \ REAL_DOT_FLOAT4(rdot4, z4, y4); \ REAL_DOT_FLOAT4(rdot5, z5, y5); \ IMAG_DOT_FLOAT4(idot0, z0, y0); \ IMAG_DOT_FLOAT4(idot1, z1, y1); \ IMAG_DOT_FLOAT4(idot2, z2, y2); \ IMAG_DOT_FLOAT4(idot3, z3, y3); \ IMAG_DOT_FLOAT4(idot4, z4, y4); \ IMAG_DOT_FLOAT4(idot5, z5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix #define REDUCE_TYPES Float2 a, short2 *yH, Float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPY_FLOAT2(a, x0, y0); \ CAXPY_FLOAT2(a, x1, y1); \ CAXPY_FLOAT2(a, x2, y2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_IMAG_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, z0, y0); \ REAL_DOT_FLOAT2(rdot1, z1, y1); \ REAL_DOT_FLOAT2(rdot2, z2, y2); \ IMAG_DOT_FLOAT2(idot0, z0, y0); \ IMAG_DOT_FLOAT2(idot1, z1, y1); \ IMAG_DOT_FLOAT2(idot2, z2, y2); \ rdot0 += rdot1; rdot0 += rdot2; \ idot0 += idot1; idot0 += idot2; #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex caxpyDotzyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyDotzyCuda(a, x.Even(), y.Even(), z.Even()) + caxpyDotzyCuda(a, x.Odd(), y.Odd(), z.Odd()); const int id = 29; quda::blas_flops += 8*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 4*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { char c = 0; double2 a2 = make_double2(real(a), imag(a)); dot = caxpyDotzyFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), c, x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { char c = 0; float2 a2 = make_float2(real(a), imag(a)); dot = caxpyDotzyFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), c, x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson dot = caxpyDotzyHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered dot = caxpyDotzyHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); }
d94976a9ab400cdd1edc51a7c8237bf851cafd92.cu
#include <stdlib.h> #include <stdio.h> #include <quda_internal.h> #include <blas_quda.h> #include <color_spinor_field.h> #include <face_quda.h> // this is where the MPI / QMP depdendent code is #include <cuComplex.h> #define REDUCE_MAX_BLOCKS 65536 #define REDUCE_DOUBLE 64 #define REDUCE_KAHAN 32 #if (__COMPUTE_CAPABILITY__ >= 130) #define REDUCE_TYPE REDUCE_DOUBLE #define QudaSumFloat double #define QudaSumComplex cuDoubleComplex #define QudaSumFloat3 double3 #else #define REDUCE_TYPE REDUCE_KAHAN #define QudaSumFloat float #define QudaSumComplex cuComplex #define QudaSumFloat3 float3 #endif // These are used for reduction kernels static QudaSumFloat *d_reduceFloat=0; static QudaSumComplex *d_reduceComplex=0; static QudaSumFloat3 *d_reduceFloat3=0; static QudaSumFloat *h_reduceFloat=0; static QudaSumComplex *h_reduceComplex=0; static QudaSumFloat3 *h_reduceFloat3=0; namespace quda { unsigned long long blas_flops; unsigned long long blas_bytes; } static dim3 blasBlock; static dim3 blasGrid; // generated by blas_test #include <blas_param.h> double2 operator+(const double2& x, const double2 &y) { return make_double2(x.x + y.x, x.y + y.y); } double3 operator+(const double3& x, const double3 &y) { double3 z; z.x = x.x + y.x; z.y = x.y + y.y; z.z = x.z + y.z; return z; } __device__ float2 operator*(const float a, const float2 x) { float2 y; y.x = a*x.x; y.y = a*x.y; return y; } template <typename Float2> __device__ Float2 operator+(const Float2 x, const Float2 y) { Float2 z; z.x = x.x + y.x; z.y = x.y + y.y; return z; } template <typename Float2> __device__ Float2 operator+=(Float2 &x, const Float2 y) { x.x += y.x; x.y += y.y; return x; } template <typename Float2> __device__ Float2 operator-=(Float2 &x, const Float2 y) { x.x -= y.x; x.y -= y.y; return x; } template <typename Float, typename Float2> __device__ Float2 operator*=(Float2 &x, const Float a) { x.x *= a; x.y *= a; return x; } template <typename Float> __device__ float4 operator*=(float4 &a, const Float &b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; return a; } void zeroCuda(cudaColorSpinorField &a) { a.zero(); } // blasTuning = 1 turns off error checking static QudaTune blasTuning = QUDA_TUNE_NO; namespace quda { void initBlas(void) { if (!d_reduceFloat) { if (cudaMalloc((void**) &d_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!d_reduceComplex) { if (cudaMalloc((void**) &d_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!d_reduceFloat3) { if (cudaMalloc((void**) &d_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating device reduction array"); } } if (!h_reduceFloat) { if (cudaMallocHost((void**) &h_reduceFloat, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } if (!h_reduceComplex) { if (cudaMallocHost((void**) &h_reduceComplex, REDUCE_MAX_BLOCKS*sizeof(QudaSumComplex)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } if (!h_reduceFloat3) { if (cudaMallocHost((void**) &h_reduceFloat3, REDUCE_MAX_BLOCKS*sizeof(QudaSumFloat3)) == cudaErrorMemoryAllocation) { errorQuda("Error allocating host reduction array"); } } } void endBlas(void) { if (d_reduceFloat) { cudaFree(d_reduceFloat); d_reduceFloat = 0; } if (d_reduceComplex) { cudaFree(d_reduceComplex); d_reduceComplex = 0; } if (d_reduceFloat3) { cudaFree(d_reduceFloat3); d_reduceFloat3 = 0; } if (h_reduceFloat) { cudaFreeHost(h_reduceFloat); h_reduceFloat = 0; } if (h_reduceComplex) { cudaFreeHost(h_reduceComplex); h_reduceComplex = 0; } if (h_reduceFloat3) { cudaFreeHost(h_reduceFloat3); h_reduceFloat3 = 0; } } void setBlasTuning(QudaTune tune) { blasTuning = tune; } void setBlasParam(int kernel, int prec, int threads, int blocks) { blas_threads[kernel][prec] = threads; blas_blocks[kernel][prec] = blocks; } } void setBlock(int kernel, int length, QudaPrecision precision) { int prec; switch(precision) { case QUDA_HALF_PRECISION: prec = 0; break; case QUDA_SINGLE_PRECISION: prec = 1; break; case QUDA_DOUBLE_PRECISION: prec = 2; break; } int blocks = min(blas_blocks[kernel][prec], max(length/blas_threads[kernel][prec], 1)); blasBlock.x = blas_threads[kernel][prec]; blasBlock.y = 1; blasBlock.z = 1; blasGrid.x = blocks; blasGrid.y = 1; blasGrid.z = 1; } #if (__COMPUTE_CAPABILITY__ >= 130) static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i) { int4 v = tex1Dfetch(t,i); return make_double2(__hiloint2double(v.y, v.x), __hiloint2double(v.w, v.z)); } #else static __inline__ __device__ double2 fetch_double2(texture<int4, 1> t, int i) { // do nothing return make_double2(0.0, 0.0); } #endif float2 __device__ read_Float2(float2 *x, int i) { return make_float2(x[i].x, x[i].y); } double2 __device__ read_Float2(double2 *x, int i) { return make_double2(x[i].x, x[i].y); } #if FERMI_NO_DBLE_TEX #define READ_DOUBLE2_TEXTURE(x, i) \ read_Float2(x, i) #else #define READ_DOUBLE2_TEXTURE(x, i) \ fetch_double2(x##TexDouble2, i) #endif #define READ_FLOAT2_TEXTURE(x, i) \ tex1Dfetch(x##TexSingle2, i) float2 __device__ make_Float2(float2 x) { return make_float2(x.x, x.y); } double2 __device__ make_Float2(double2 x) { return make_double2(x.x, x.y); } #define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float4 a##0 = tex1Dfetch(texHalf, i + 0*length); \ float4 a##1 = tex1Dfetch(texHalf, i + 1*length); \ float4 a##2 = tex1Dfetch(texHalf, i + 2*length); \ float4 a##3 = tex1Dfetch(texHalf, i + 3*length); \ float4 a##4 = tex1Dfetch(texHalf, i + 4*length); \ float4 a##5 = tex1Dfetch(texHalf, i + 5*length); \ a##0 *= a##c; \ a##1 *= a##c; \ a##2 *= a##c; \ a##3 *= a##c; \ a##4 *= a##c; \ a##5 *= a##c; #define RECONSTRUCT_HALF_SPINOR_ST(a, texHalf, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float2 a##0 = tex1Dfetch(texHalf, i + 0*length); \ float2 a##1 = tex1Dfetch(texHalf, i + 1*length); \ float2 a##2 = tex1Dfetch(texHalf, i + 2*length); \ (a##0) *= a##c; \ (a##1) *= a##c; \ (a##2) *= a##c; // Some musings on how to clean up the blas code using Boost /*#define BOOST_RECONSTRUCT_HALF_SPINOR(z, j, a, texHalf, length) \ float4 a##k tex1Dfetch(texHalf, i + j*length); \ a##k *= a##c; #define RECONSTRUCT_HALF_SPINOR(a, texHalf, texNorm, length) \ BOOST_PP_REPEAT(6, BOOST_RECONSTRUCT_HALF_SPINOR, a, texHalf, length) \ */ #define READ_HALF_SPINOR_TEX(a, tex, texNorm, length) \ float a##c = tex1Dfetch(texNorm, i); \ float4 a##0 = tex1Dfetch(tex, i + 0*length); \ float4 a##1 = tex1Dfetch(tex, i + 1*length); \ float4 a##2 = tex1Dfetch(tex, i + 2*length); \ float4 a##3 = tex1Dfetch(tex, i + 3*length); \ float4 a##4 = tex1Dfetch(tex, i + 4*length); \ float4 a##5 = tex1Dfetch(tex, i + 5*length); \ #define READ_HALF_SPINOR(a, tex, length) \ float4 a##0 = tex1Dfetch(tex, i + 0*length); \ float4 a##1 = tex1Dfetch(tex, i + 1*length); \ float4 a##2 = tex1Dfetch(tex, i + 2*length); \ float4 a##3 = tex1Dfetch(tex, i + 3*length); \ float4 a##4 = tex1Dfetch(tex, i + 4*length); \ float4 a##5 = tex1Dfetch(tex, i + 5*length); \ float a##c = a##N[i]; #define READ_HALF_SPINOR_ST(a, tex, length) \ float2 a##0 = tex1Dfetch(tex, i + 0*length); \ float2 a##1 = tex1Dfetch(tex, i + 1*length); \ float2 a##2 = tex1Dfetch(tex, i + 2*length); \ float a##c = a##N[i]; #define FAST_ABS_MAX(a, b) fmaxf(fabsf(a), fabsf(b)); #define FAST_MAX(a, b) fmaxf(a, b); __device__ float fast_abs_max(float4 a) { float c0 = FAST_ABS_MAX(a.x, a.y); float c1 = FAST_ABS_MAX(a.z, a.w); return FAST_MAX(c0, c1); } #define CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, n, a, length) { \ float c0 = fast_abs_max(a##0); \ float c1 = fast_abs_max(a##1); \ c0 = FAST_MAX(c0, c1); \ float c2 = fast_abs_max(a##2); \ float c3 = fast_abs_max(a##3); \ c1 = FAST_MAX(c2, c3); \ c0 = FAST_MAX(c0, c1); \ c2 = fast_abs_max(a##4); \ c3 = fast_abs_max(a##5); \ c1 = FAST_MAX(c2, c3); \ c0 = FAST_MAX(c0, c1); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \ (short)(C*(float)(a##0).z), (short)(C*(float)(a##0).w)); \ h[i+1*length] = make_short4((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y), \ (short)(C*(float)(a##1).z), (short)(C*(float)(a##1).w)); \ h[i+2*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \ (short)(C*(float)(a##2).z), (short)(C*(float)(a##2).w)); \ h[i+3*length] = make_short4((short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y), \ (short)(C*(float)(a##3).z), (short)(C*(float)(a##3).w)); \ h[i+4*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \ (short)(C*(float)(a##4).z), (short)(C*(float)(a##4).w)); \ h[i+5*length] = make_short4((short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y), \ (short)(C*(float)(a##5).z), (short)(C*(float)(a##5).w));} #define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ float c3 = fmaxf(fabsf((a##3).x), fabsf((a##3).y)); \ float c4 = fmaxf(fabsf((a##4).x), fabsf((a##4).y)); \ float c5 = fmaxf(fabsf((a##5).x), fabsf((a##5).y)); \ float c6 = fmaxf(fabsf((a##6).x), fabsf((a##6).y)); \ float c7 = fmaxf(fabsf((a##7).x), fabsf((a##7).y)); \ float c8 = fmaxf(fabsf((a##8).x), fabsf((a##8).y)); \ float c9 = fmaxf(fabsf((a##9).x), fabsf((a##9).y)); \ float c10 = fmaxf(fabsf((a##10).x), fabsf((a##10).y)); \ float c11 = fmaxf(fabsf((a##11).x), fabsf((a##11).y)); \ c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); c2 = fmaxf(c4, c5); c3 = fmaxf(c6, c7); \ c4 = fmaxf(c8, c9); c5 = fmaxf(c10, c11); c0 = fmaxf(c0, c1); c1 = fmaxf(c2, c3); \ c2 = fmaxf(c4, c5); c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short4((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y), \ (short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+1*length] = make_short4((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y), \ (short)(C*(float)(a##3).x), (short)(C*(float)(a##3).y)); \ h[i+2*length] = make_short4((short)(C*(float)(a##4).x), (short)(C*(float)(a##4).y), \ (short)(C*(float)(a##5).x), (short)(C*(float)(a##5).y)); \ h[i+3*length] = make_short4((short)(C*(float)(a##6).x), (short)(C*(float)(a##6).y), \ (short)(C*(float)(a##7).x), (short)(C*(float)(a##7).y)); \ h[i+4*length] = make_short4((short)(C*(float)(a##8).x), (short)(C*(float)(a##8).y), \ (short)(C*(float)(a##9).x), (short)(C*(float)(a##9).y)); \ h[i+5*length] = make_short4((short)(C*(float)(a##10).x), (short)(C*(float)(a##10).y), \ (short)(C*(float)(a##11).x), (short)(C*(float)(a##11).y));} #define CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \ h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));} #define CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, n, a, length) \ {float c0 = fmaxf(fabsf((a##0).x), fabsf((a##0).y)); \ float c1 = fmaxf(fabsf((a##1).x), fabsf((a##1).y)); \ float c2 = fmaxf(fabsf((a##2).x), fabsf((a##2).y)); \ c0 = fmaxf(c0, c1); c0 = fmaxf(c0, c2); \ n[i] = c0; \ float C = __fdividef(MAX_SHORT, c0); \ h[i+0*length] = make_short2((short)(C*(float)(a##0).x), (short)(C*(float)(a##0).y)); \ h[i+1*length] = make_short2((short)(C*(float)(a##1).x), (short)(C*(float)(a##1).y)); \ h[i+2*length] = make_short2((short)(C*(float)(a##2).x), (short)(C*(float)(a##2).y));} #define SUM_FLOAT4(sum, a) \ float sum = fabs(a.x) + fabs(a.y) + fabs(a.z) + fabs(a.w); #define SUM_FLOAT2(sum, a) \ float sum = fabs(a.x) + fabs(a.y); #if (__COMPUTE_CAPABILITY__ < 200) #define REAL_DOT_FLOAT4(dot, a, b) \ float dot = a.x*b.x + a.y*b.y + a.z*b.z + a.w*b.w; #else #define REAL_DOT_FLOAT4(dot, a, b) \ float dot = fmaf(a.x, b.x, 0.0f); \ dot = fmaf(a.y, b.y, dot); \ dot = fmaf(a.z, b.z, dot); \ dot = fmaf(a.w, b.w, dot) #endif #define REAL_DOT_FLOAT2(dot, a, b) \ float dot = a.x*b.x + a.y*b.y; #if (__COMPUTE_CAPABILITY__ < 200) #define IMAG_DOT_FLOAT4(dot, a, b) \ float dot = a.x*b.y - a.y*b.x + a.z*b.w - a.w*b.z; #else #define IMAG_DOT_FLOAT4(dot, a, b) \ float dot = fmaf(a.x, b.y, 0.0f); \ dot = fmaf(-a.y, b.x, dot); \ dot = fmaf(a.z, b.w, dot); \ dot = fmaf(-a.w, b.z, dot) #endif #define IMAG_DOT_FLOAT2(dot, a, b) \ float dot = a.x*b.y - a.y*b.x; #define AX_FLOAT4(a, X) \ X.x *= a; X.y *= a; X.z *= a; X.w *= a; #define AX_FLOAT2(a, X) \ X.x *= a; X.y *= a; #define XPY_FLOAT4(X, Y) \ Y.x += X.x; Y.y += X.y; Y.z += X.z; Y.w += X.w; #define XPY_FLOAT2(X, Y) \ Y.x += X.x; Y.y += X.y; #define XMY_FLOAT4(X, Y) \ Y.x = X.x - Y.x; Y.y = X.y - Y.y; Y.z = X.z - Y.z; Y.w = X.w - Y.w; #define XMY_FLOAT2(X, Y) \ Y.x = X.x - Y.x; Y.y = X.y - Y.y; #define MXPY_FLOAT4(X, Y) \ Y.x -= X.x; Y.y -= X.y; Y.z -= X.z; Y.w -= X.w; #define MXPY_FLOAT2(X, Y) \ Y.x -= X.x; Y.y -= X.y; #if (__COMPUTE_CAPABILITY__ < 200) #define AXPY_FLOAT4(a, X, Y) \ Y.x += a*X.x; Y.y += a*X.y; \ Y.z += a*X.z; Y.w += a*X.w; #else #define AXPY_FLOAT4(a, X, Y) \ Y.x = fmaf(a, X.x, Y.x); Y.y = fmaf(a, X.y, Y.y); \ Y.z = fmaf(a, X.z, Y.z); Y.w = fmaf(a, X.w, Y.w); #endif #define AXPY_FLOAT2(a, X, Y) \ Y.x += a*X.x; Y.y += a*X.y; #define AXPBY_FLOAT4(a, X, b, Y) \ Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \ Y.z = b*Y.z; Y.z += a*X.z; Y.w = b*Y.w; Y.w += a*X.w; #define AXPBY_FLOAT2(a, X, b, Y) \ Y.x = b*Y.x; Y.x += a*X.x; Y.y = b*Y.y; Y.y += a*X.y; \ #if (__COMPUTE_CAPABILITY__ < 200) #define XPAY_FLOAT4(X, a, Y) \ Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; \ Y.z = X.z + a*Y.z; Y.w = X.w + a*Y.w; #else #define XPAY_FLOAT4(X, a, Y) \ Y.x = fmaf(a, Y.x, X.x); Y.y = fmaf(a, Y.y, X.y); \ Y.z = fmaf(a, Y.z, X.z); Y.w = fmaf(a, Y.w, X.w); #endif #define XPAY_FLOAT2(X, a, Y) \ Y.x = X.x + a*Y.x; Y.y = X.y + a*Y.y; #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_FLOAT4(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; \ Y.z += a.x*X.z; Y.z -= a.y*X.w; \ Y.w += a.y*X.z; Y.w += a.x*X.w; #else #define CAXPY_FLOAT4(a, X, Y) \ Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \ Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); \ Y.z = fmaf(a.x, X.z, Y.z); Y.z = fmaf(-a.y, X.w, Y.z); \ Y.w = fmaf(a.y, X.z, Y.w); Y.w = fmaf( a.x, X.w, Y.w); #endif // (__COMPUTE_CAPABILITY__ < 200) #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_FLOAT2(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; #else #define CAXPY_FLOAT2(a, X, Y) \ Y.x = fmaf(a.x, X.x, Y.x); Y.x = fmaf(-a.y, X.y, Y.x); \ Y.y = fmaf(a.y, X.x, Y.y); Y.y = fmaf( a.x, X.y, Y.y); #endif // (__COMPUTE_CAPABILITY__ < 200) #define CAXPY_DOUBLE2(a, X, Y) \ Y.x += a.x*X.x; Y.x -= a.y*X.y; \ Y.y += a.y*X.x; Y.y += a.x*X.y; \ #define CMAXPY_FLOAT4(a, X, Y) \ Y.x -= a.x*X.x; Y.x += a.y*X.y; \ Y.y -= a.y*X.x; Y.y -= a.x*X.y; \ Y.z -= a.x*X.z; Y.z += a.y*X.w; \ Y.w -= a.y*X.z; Y.w -= a.x*X.w; #define CMAXPY_FLOAT2(a, X, Y) \ Y.x -= a.x*X.x; Y.x += a.y*X.y; \ Y.y -= a.y*X.x; Y.y -= a.x*X.y; #define CAXPBY_FLOAT4(a, X, b, Y) \ { float2 y; \ y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \ y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \ Y.x = y.x; Y.y = y.y; \ y.x = a.x*X.z; y.x -= a.y*X.w; y.x += b.x*Y.z; y.x -= b.y*Y.w; \ y.y = a.y*X.z; y.y += a.x*X.w; y.y += b.y*Y.z; y.y += b.x*Y.w; \ Y.z = y.x; Y.w = y.y;} #define CAXPBY_FLOAT2(a, X, b, Y) \ { float2 y; \ y.x = a.x*X.x; y.x -= a.y*X.y; y.x += b.x*Y.x; y.x -= b.y*Y.y; \ y.y = a.y*X.x; y.y += a.x*X.y; y.y += b.y*Y.x; y.y += b.x*Y.y; \ Y.x = y.x; Y.y = y.y;} #define CXPAYPBZ_FLOAT4(X, a, Y, b, Z) \ {float2 z; \ z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \ z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \ Z.x = z.x; Z.y = z.y; \ z.x = X.z + a.x*Y.z; z.x -= a.y*Y.w; z.x += b.x*Z.z; z.x -= b.y*Z.w; \ z.y = X.w + a.y*Y.z; z.y += a.x*Y.w; z.y += b.y*Z.z; z.y += b.x*Z.w; \ Z.z = z.x; Z.w = z.y;} #define CXPAYPBZ_FLOAT2(X, a, Y, b, Z) \ {float2 z; \ z.x = X.x + a.x*Y.x; z.x -= a.y*Y.y; z.x += b.x*Z.x; z.x -= b.y*Z.y; \ z.y = X.y + a.y*Y.x; z.y += a.x*Y.y; z.y += b.y*Z.x; z.y += b.x*Z.y; \ Z.x = z.x; Z.y = z.y;} #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \ Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \ Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; \ Z.z += a.x*X.z - a.y*X.w + b.x*Y.z - b.y*Y.w; \ Z.w += a.y*X.z + a.x*X.w + b.y*Y.z + b.x*Y.w; #else #define CAXPBYPZ_FLOAT4(a, X, b, Y, Z) \ Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \ Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); \ Z.z = fmaf(a.x, X.z, Z.z); Z.z = fmaf(-a.y, X.w, Z.z); Z.z = fmaf(b.x, Y.z, Z.z); Z.z = fmaf(-b.y, Y.w, Z.z); \ Z.w = fmaf(a.y, X.z, Z.w); Z.w = fmaf( a.x, X.w, Z.w); Z.w = fmaf(b.y, Y.z, Z.w); Z.w = fmaf( b.x, Y.w, Z.w); #endif // (__COMPUTE_CAPABILITY__ < 200) #if (__COMPUTE_CAPABILITY__ < 200) #define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \ Z.x += a.x*X.x - a.y*X.y + b.x*Y.x - b.y*Y.y; \ Z.y += a.y*X.x + a.x*X.y + b.y*Y.x + b.x*Y.y; #else #define CAXPBYPZ_FLOAT2(a, X, b, Y, Z) \ Z.x = fmaf(a.x, X.x, Z.x); Z.x = fmaf(-a.y, X.y, Z.x); Z.x = fmaf(b.x, Y.x, Z.x); Z.x = fmaf(-b.y, Y.y, Z.x); \ Z.y = fmaf(a.y, X.x, Z.y); Z.y = fmaf( a.x, X.y, Z.y); Z.y = fmaf(b.y, Y.x, Z.y); Z.y = fmaf( b.x, Y.y, Z.y); #endif // (__COMPUTE_CAPABILITY__ < 200) // Double precision input spinor field texture<int4, 1> xTexDouble2; texture<int4, 1> yTexDouble2; texture<int4, 1> zTexDouble2; texture<int4, 1> wTexDouble2; texture<int4, 1> uTexDouble2; // Single precision input spinor field texture<float2, 1> xTexSingle2; texture<float2, 1> yTexSingle2; texture<float4, 1> xTexSingle4; // Half precision input spinor field texture<short4, 1, cudaReadModeNormalizedFloat> texHalf1; texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt1; texture<float, 1, cudaReadModeElementType> texNorm1; // Half precision input spinor field texture<short4, 1, cudaReadModeNormalizedFloat> texHalf2; texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt2; texture<float, 1, cudaReadModeElementType> texNorm2; // Half precision input spinor field texture<short4, 1, cudaReadModeNormalizedFloat> texHalf3; texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt3; texture<float, 1, cudaReadModeElementType> texNorm3; // Half precision input spinor field texture<short4, 1, cudaReadModeNormalizedFloat> texHalf4; texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt4; texture<float, 1, cudaReadModeElementType> texNorm4; // Half precision input spinor field texture<short4, 1, cudaReadModeNormalizedFloat> texHalf5; texture<short2, 1, cudaReadModeNormalizedFloat> texHalfSt5; texture<float, 1, cudaReadModeElementType> texNorm5; void bindTexture(const cudaColorSpinorField *x, const cudaColorSpinorField *y=0, const cudaColorSpinorField *z=0, const cudaColorSpinorField *w=0, const cudaColorSpinorField *u=0) { QudaPrecision precision = x->Precision(); size_t spinor_bytes = x->Bytes() * precision; size_t norm_bytes = x->NormBytes(); if (precision == QUDA_DOUBLE_PRECISION) { cudaBindTexture(0, xTexDouble2, x->V(), spinor_bytes); if (y) cudaBindTexture(0, yTexDouble2, y->V(), spinor_bytes); if (z) cudaBindTexture(0, zTexDouble2, z->V(), spinor_bytes); if (w) cudaBindTexture(0, wTexDouble2, w->V(), spinor_bytes); if (u) cudaBindTexture(0, uTexDouble2, u->V(), spinor_bytes); } else if (precision == QUDA_SINGLE_PRECISION) { if (x->Nspin() == 4) { cudaBindTexture(0, xTexSingle4, x->V(), spinor_bytes); } else if (x->Nspin() == 1) { cudaBindTexture(0, xTexSingle2, x->V(), spinor_bytes); if (y) cudaBindTexture(0, yTexSingle2, y->V(), spinor_bytes); } else { errorQuda("Number of spins undefined"); } } else if (precision == QUDA_HALF_PRECISION) { if (x->Nspin() == 4){ //wilson cudaBindTexture(0, texHalf1, x->V(), spinor_bytes); cudaBindTexture(0, texNorm1, x->Norm(), norm_bytes); if (y) { cudaBindTexture(0, texHalf2, y->V(), spinor_bytes); cudaBindTexture(0, texNorm2, y->Norm(), norm_bytes); } if (z) { cudaBindTexture(0, texHalf3, z->V(), spinor_bytes); cudaBindTexture(0, texNorm3, z->Norm(), norm_bytes); } if (w) { cudaBindTexture(0, texHalf4, w->V(), spinor_bytes); cudaBindTexture(0, texNorm4, w->Norm(), norm_bytes); } if (u) { cudaBindTexture(0, texHalf5, u->V(), spinor_bytes); cudaBindTexture(0, texNorm5, u->Norm(), norm_bytes); } } else if (x->Nspin() == 1){ //staggered cudaBindTexture(0, texHalfSt1, x->V(), spinor_bytes); cudaBindTexture(0, texNorm1, x->Norm(), norm_bytes); if (y) { cudaBindTexture(0, texHalfSt2, y->V(), spinor_bytes); cudaBindTexture(0, texNorm2, y->Norm(), norm_bytes); } if (z) { cudaBindTexture(0, texHalfSt3, z->V(), spinor_bytes); cudaBindTexture(0, texNorm3, z->Norm(), norm_bytes); } if (w) { cudaBindTexture(0, texHalfSt4, w->V(), spinor_bytes); cudaBindTexture(0, texNorm4, w->Norm(), norm_bytes); } if (u) { cudaBindTexture(0, texHalfSt5, u->V(), spinor_bytes); cudaBindTexture(0, texNorm5, u->Norm(), norm_bytes); } } else { errorQuda("Number of spins undefined"); } } else { errorQuda("Precision undefined"); } } #define checkSpinor(a, b) \ { \ if (a.Precision() != b.Precision()) \ errorQuda("precisions do not match: %d %d", a.Precision(), b.Precision()); \ if (a.Length() != b.Length()) \ errorQuda("lengths do not match: %d %d", a.Length(), b.Length()); \ if (a.Stride() != b.Stride()) \ errorQuda("strides do not match: %d %d", a.Stride(), b.Stride()); \ } // For kernels with precision conversion built in #define checkSpinorLength(a, b) \ { \ if (a.Length() != b.Length()) { \ errorQuda("engths do not match: %d %d", a.Length(), b.Length()); \ } __global__ void convertDSKernel(double2 *dst, float4 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<6; k++) { dst[2*k*length+i].x = src[k*length+i].x; dst[2*k*length+i].y = src[k*length+i].y; dst[(2*k+1)*length+i].x = src[k*length+i].z; dst[(2*k+1)*length+i].y = src[k*length+i].w; } i += gridSize; } } __global__ void convertDSKernel(double2 *dst, float2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<3; k++) { dst[k*length+i].x = src[k*length+i].x; dst[k*length+i].y = src[k*length+i].y; } i += gridSize; } } __global__ void convertSDKernel(float4 *dst, double2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<6; k++) { dst[k*length+i].x = src[2*k*length+i].x; dst[k*length+i].y = src[2*k*length+i].y; dst[k*length+i].z = src[(2*k+1)*length+i].x; dst[k*length+i].w = src[(2*k+1)*length+i].y; } i += gridSize; } } __global__ void convertSDKernel(float2 *dst, double2 *src, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { for (int k=0; k<3; k++) { dst[k*length+i].x = src[k*length+i].x; dst[k*length+i].y = src[k*length+i].y; } i += gridSize; } } __global__ void convertHSKernel(short4 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { float4 F0 = tex1Dfetch(xTexSingle4, i + 0*length); float4 F1 = tex1Dfetch(xTexSingle4, i + 1*length); float4 F2 = tex1Dfetch(xTexSingle4, i + 2*length); float4 F3 = tex1Dfetch(xTexSingle4, i + 3*length); float4 F4 = tex1Dfetch(xTexSingle4, i + 4*length); float4 F5 = tex1Dfetch(xTexSingle4, i + 5*length); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(h, norm, F, length); i += gridSize; } } __global__ void convertHSKernel(short2 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { float2 F0 = tex1Dfetch(xTexSingle2, i + 0*length); float2 F1 = tex1Dfetch(xTexSingle2, i + 1*length); float2 F2 = tex1Dfetch(xTexSingle2, i + 2*length); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(h, norm, F, length); i += gridSize; } } __global__ void convertSHKernel(float4 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i<real_length) { RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length); res[0*length+i] = I0; res[1*length+i] = I1; res[2*length+i] = I2; res[3*length+i] = I3; res[4*length+i] = I4; res[5*length+i] = I5; i += gridSize; } } __global__ void convertSHKernel(float2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i<real_length) { RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length); res[0*length+i] = I0; res[1*length+i] = I1; res[2*length+i] = I2; i += gridSize; } } __global__ void convertHDKernel(short4 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { double2 F0 = fetch_double2(xTexDouble2, i+0*length); double2 F1 = fetch_double2(xTexDouble2, i+1*length); double2 F2 = fetch_double2(xTexDouble2, i+2*length); double2 F3 = fetch_double2(xTexDouble2, i+3*length); double2 F4 = fetch_double2(xTexDouble2, i+4*length); double2 F5 = fetch_double2(xTexDouble2, i+5*length); double2 F6 = fetch_double2(xTexDouble2, i+6*length); double2 F7 = fetch_double2(xTexDouble2, i+7*length); double2 F8 = fetch_double2(xTexDouble2, i+8*length); double2 F9 = fetch_double2(xTexDouble2, i+9*length); double2 F10 = fetch_double2(xTexDouble2, i+10*length); double2 F11 = fetch_double2(xTexDouble2, i+11*length); CONSTRUCT_HALF_SPINOR_FROM_DOUBLE(h, norm, F, length); i += gridSize; } } __global__ void convertHDKernel(short2 *h, float *norm, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { double2 F0 = fetch_double2(xTexDouble2, i+0*length); double2 F1 = fetch_double2(xTexDouble2, i+1*length); double2 F2 = fetch_double2(xTexDouble2, i+2*length); CONSTRUCT_HALF_SPINOR_FROM_DOUBLE_ST(h, norm, F, length); i += gridSize; } } __global__ void convertDHKernel(double2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { RECONSTRUCT_HALF_SPINOR(I, texHalf1, texNorm1, length); res[0*length+i] = make_double2(I0.x, I0.y); res[1*length+i] = make_double2(I0.z, I0.w); res[2*length+i] = make_double2(I1.x, I1.y); res[3*length+i] = make_double2(I1.z, I1.w); res[4*length+i] = make_double2(I2.x, I2.y); res[5*length+i] = make_double2(I2.z, I2.w); res[6*length+i] = make_double2(I3.x, I3.y); res[7*length+i] = make_double2(I3.z, I3.w); res[8*length+i] = make_double2(I4.x, I4.y); res[9*length+i] = make_double2(I4.z, I4.w); res[10*length+i] = make_double2(I5.x, I5.y); res[11*length+i] = make_double2(I5.z, I5.w); i += gridSize; } } __global__ void convertDHKernelSt(double2 *res, int length, int real_length) { int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while(i < real_length) { RECONSTRUCT_HALF_SPINOR_ST(I, texHalfSt1, texNorm1, length); res[0*length+i] = make_double2(I0.x, I0.y); res[1*length+i] = make_double2(I1.x, I1.y); res[2*length+i] = make_double2(I2.x, I2.y); i += gridSize; } } void copyCuda(cudaColorSpinorField &dst, const cudaColorSpinorField &src) { if (&src == &dst) return; // aliasing fields if (src.Nspin() != 1 && src.Nspin() != 4){ errorQuda("nSpin(%d) not supported in function %s, line %d\n", src.Nspin(), __FUNCTION__, __LINE__); } if ((dst.Precision() == QUDA_HALF_PRECISION || src.Precision() == QUDA_HALF_PRECISION) && (dst.SiteSubset() == QUDA_FULL_SITE_SUBSET || src.SiteSubset() == QUDA_FULL_SITE_SUBSET)) { copyCuda(dst.Even(), src.Even()); copyCuda(dst.Odd(), src.Odd()); return; } // For a given dst precision, there are two non-trivial possibilities for the // src precision. The higher one corresponds to kernel index 0 (in the table // of block and grid dimensions), while the lower one corresponds to index 1. int id; if (src.Precision() == QUDA_DOUBLE_PRECISION || dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { id = 0; } else { id = 1; } setBlock(id, dst.Stride(), dst.Precision()); quda::blas_bytes += src.RealLength()*((int)src.Precision() + (int)dst.Precision()); if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { if (src.Nspin() == 4){ convertDSKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), (float4*)src.V(), src.Stride()); }else{ //src.Nspin() == 1 convertDSKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), (float2*)src.V(), src.Stride()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) { if (src.Nspin() == 4){ convertSDKernel<<<blasGrid, blasBlock>>>((float4*)dst.V(), (double2*)src.V(), src.Stride()); }else{ //src.Nspin() ==1 convertSDKernel<<<blasGrid, blasBlock>>>((float2*)dst.V(), (double2*)src.V(), src.Stride()); } } else if (dst.Precision() == QUDA_SINGLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) { quda::blas_bytes += src.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ convertSHKernel<<<blasGrid, blasBlock>>>((float4*)dst.V(), src.Stride(), src.Volume()); cudaUnbindTexture(texHalf1); cudaUnbindTexture(texNorm1); }else{ //nSpin== 1; convertSHKernel<<<blasGrid, blasBlock>>>((float2*)dst.V(), src.Stride(), src.Volume()); cudaUnbindTexture(texHalfSt1); cudaUnbindTexture(texNorm1); } } else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_SINGLE_PRECISION) { quda::blas_bytes += dst.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ convertHSKernel<<<blasGrid, blasBlock>>>((short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); cudaUnbindTexture(xTexSingle4); }else{ //nSpin == 1 convertHSKernel<<<blasGrid, blasBlock>>>((short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); cudaUnbindTexture(xTexSingle2); } } else if (dst.Precision() == QUDA_DOUBLE_PRECISION && src.Precision() == QUDA_HALF_PRECISION) { quda::blas_bytes += src.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ convertDHKernel<<<blasGrid, blasBlock>>>((double2*)dst.V(), src.Stride(), src.Volume()); cudaUnbindTexture(texHalf1); cudaUnbindTexture(texNorm1); }else{//nSpinr == 1 convertDHKernelSt<<<blasGrid, blasBlock>>>((double2*)dst.V(), src.Stride(), src.Volume()); cudaUnbindTexture(texHalfSt1); cudaUnbindTexture(texNorm1); } } else if (dst.Precision() == QUDA_HALF_PRECISION && src.Precision() == QUDA_DOUBLE_PRECISION) { quda::blas_bytes += dst.Volume()*sizeof(float); bindTexture(&src); if (src.Nspin() == 4){ convertHDKernel<<<blasGrid, blasBlock>>>((short4*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); }else{ //nSpinr == 1 convertHDKernel<<<blasGrid, blasBlock>>>((short2*)dst.V(), (float*)dst.Norm(), src.Stride(), src.Volume()); } cudaUnbindTexture(xTexDouble2); } else { cudaMemcpy(dst.V(), src.V(), dst.Bytes(), cudaMemcpyDeviceToDevice); if (dst.Precision() == QUDA_HALF_PRECISION) { cudaMemcpy(dst.Norm(), src.Norm(), dst.Bytes()/(dst.Ncolor()*dst.Nspin()), cudaMemcpyDeviceToDevice); quda::blas_bytes += 2*dst.RealLength()*sizeof(float); } } if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpbyKernel(Float a, Float2 *x, Float b, Float2 *y, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { y[i] = a*x[i] + b*y[i]; i += gridSize; } } __global__ void axpbyHKernel(float a, float b, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPBY_FLOAT4(a, x0, b, y0); AXPBY_FLOAT4(a, x1, b, y1); AXPBY_FLOAT4(a, x2, b, y2); AXPBY_FLOAT4(a, x3, b, y3); AXPBY_FLOAT4(a, x4, b, y4); AXPBY_FLOAT4(a, x5, b, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void axpbyHKernel(float a, float b, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AXPBY_FLOAT2(a, x0, b, y0); AXPBY_FLOAT2(a, x1, b, y1); AXPBY_FLOAT2(a, x2, b, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = a*x[i] + b*y[i] void axpbyCuda(const double &a, cudaColorSpinorField &x, const double &b, cudaColorSpinorField &y) { setBlock(2, x.Length(), x.Precision()); checkSpinor(x, y); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpbyCuda(a, x.Even(), b, y.Even()); axpbyCuda(a, x.Odd(), b, y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { axpbyKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), b, (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { axpbyKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float)b, (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson axpbyHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1) {//staggered axpbyHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 3*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float> __global__ void xpyKernel(Float *x, Float *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] += x[i]; i += gridSize; } } __global__ void xpyHKernel(short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); XPY_FLOAT4(x0, y0); XPY_FLOAT4(x1, y1); XPY_FLOAT4(x2, y2); XPY_FLOAT4(x3, y3); XPY_FLOAT4(x4, y4); XPY_FLOAT4(x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void xpyHKernel(short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); XPY_FLOAT2(x0, y0); XPY_FLOAT2(x1, y1); XPY_FLOAT2(x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = x[i] + y[i] void xpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(3, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { xpyCuda(x.Even(), y.Even()); xpyCuda(x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { xpyKernel<<<blasGrid, blasBlock>>>((double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { xpyKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson xpyHKernel<<<blasGrid, blasBlock>>>((short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered xpyHKernel<<<blasGrid, blasBlock>>>((short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyKernel(Float a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] += a*x[i]; i += gridSize; } } __global__ void axpyHKernel(float a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPY_FLOAT4(a, x0, y0); AXPY_FLOAT4(a, x1, y1); AXPY_FLOAT4(a, x2, y2); AXPY_FLOAT4(a, x3, y3); AXPY_FLOAT4(a, x4, y4); AXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void axpyHKernel(float a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AXPY_FLOAT2(a, x0, y0); AXPY_FLOAT2(a, x1, y1); AXPY_FLOAT2(a, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = a*x[i] + y[i] void axpyCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(4, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpyCuda(a, x.Even(), y.Even()); axpyCuda(a, x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { axpyKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { axpyKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson axpyHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered axpyHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 2*x.RealLength(); if (!blasTuning) checkCudaError(); // blas_quda may require new blas_param from blas_test } template <typename Float, typename Float2> __global__ void xpayKernel(const Float2 *x, Float a, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] = x[i] + a*y[i]; i += gridSize; } } __global__ void xpayHKernel(float a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); XPAY_FLOAT4(x0, a, y0); XPAY_FLOAT4(x1, a, y1); XPAY_FLOAT4(x2, a, y2); XPAY_FLOAT4(x3, a, y3); XPAY_FLOAT4(x4, a, y4); XPAY_FLOAT4(x5, a, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void xpayHKernel(float a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); XPAY_FLOAT2(x0, a, y0); XPAY_FLOAT2(x1, a, y1); XPAY_FLOAT2(x2, a, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = x[i] + a*y[i] void xpayCuda(const cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(5, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { xpayCuda(x.Even(), a, y.Even()); xpayCuda(x.Odd(), a, y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { xpayKernel<<<blasGrid, blasBlock>>>((double*)x.V(), a, (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { xpayKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float)a, (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson xpayHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() ==1){ //staggered xpayHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 2*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float> __global__ void mxpyKernel(Float *x, Float *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { y[i] -= x[i]; i += gridSize; } } __global__ void mxpyHKernel(short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); MXPY_FLOAT4(x0, y0); MXPY_FLOAT4(x1, y1); MXPY_FLOAT4(x2, y2); MXPY_FLOAT4(x3, y3); MXPY_FLOAT4(x4, y4); MXPY_FLOAT4(x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void mxpyHKernel(short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); MXPY_FLOAT2(x0, y0); MXPY_FLOAT2(x1, y1); MXPY_FLOAT2(x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] -= x[i] (minus x plus y) void mxpyCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); setBlock(6, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { mxpyCuda(x.Even(), y.Even()); mxpyCuda(x.Odd(), y.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { mxpyKernel<<<blasGrid, blasBlock>>>((double*)x.V(), (double*)y.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { mxpyKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), (float2*)y.V(), x.Length()/2); } else { bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson mxpyHKernel<<<blasGrid, blasBlock>>>((short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1) { //staggered mxpyHKernel<<<blasGrid, blasBlock>>>((short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axKernel(Float a, Float2 *x, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { x[i] *= a; i += gridSize; } } __global__ void axHKernel(float a, short4 *xH, float *xN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2); AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axHKernel(float a, short2 *xH, float *xN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operation x[i] = a*x[i] void axCuda(const double &a, cudaColorSpinorField &x) { setBlock(7, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axCuda(a, x.Even()); axCuda(a, x.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { axKernel<<<blasGrid, blasBlock>>>(a, (double*)x.V(), x.Length()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { axKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), x.Length()/2); } else { bindTexture(&x); if (x.Nspin() == 4){ //wilson axHKernel<<<blasGrid, blasBlock>>>((float)a, (short4*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume()); }else if (x.Nspin() ==1){ //staggered axHKernel<<<blasGrid, blasBlock>>>((float)a, (short2*)x.V(), (float*)x.Norm(), x.Stride(), x.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 2*x.Volume()*sizeof(float); } quda::blas_bytes += 2*x.RealLength()*x.Precision(); quda::blas_flops += x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpyDKernel(Float2 a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z = READ_DOUBLE2_TEXTURE(x, i); y[i].x += a.x*Z.x - a.y*Z.y; y[i].y += a.y*Z.x + a.x*Z.y; i += gridSize; } } template <typename Float2> __global__ void caxpySKernel(Float2 a, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z = read_Float2(x, i); y[i].x += a.x*Z.x - a.y*Z.y; y[i].y += a.y*Z.x + a.x*Z.y; i += gridSize; } } __global__ void caxpyHKernel(float2 a, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPY_FLOAT4(a, x0, y0); CAXPY_FLOAT4(a, x1, y1); CAXPY_FLOAT4(a, x2, y2); CAXPY_FLOAT4(a, x3, y3); CAXPY_FLOAT4(a, x4, y4); CAXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpyHKernel(float2 a, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPY_FLOAT2(a, x0, y0); CAXPY_FLOAT2(a, x1, y1); CAXPY_FLOAT2(a, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] += a*x[i] void caxpyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(8, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpyCuda(a, x.Even(), y.Even()); caxpyCuda(a, x.Odd(), y.Odd()); return; } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 4*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); double2 a2 = make_double2(real(a), imag(a)); caxpyDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); caxpySKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), (float2*)y.V(), length); } else { bindTexture(&x, &y); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson caxpyHKernel<<<blasGrid, blasBlock>>>(a2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else if (x.Nspin() == 1){ //staggered caxpyHKernel<<<blasGrid, blasBlock>>>(a2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else { errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbyDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z1 = READ_DOUBLE2_TEXTURE(x, i); Float2 Z2 = READ_DOUBLE2_TEXTURE(y, i); y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y; y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y; i += gridSize; } } template <typename Float2> __global__ void caxpbySKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 Z1 = read_Float2(x, i); Float2 Z2 = read_Float2(y, i); y[i].x = a.x*Z1.x + b.x*Z2.x - a.y*Z1.y - b.y*Z2.y; y[i].y = a.y*Z1.x + b.y*Z2.x + a.x*Z1.y + b.x*Z2.y; i += gridSize; } } __global__ void caxpbyHKernel(float2 a, float2 b, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPBY_FLOAT4(a, x0, b, y0); CAXPBY_FLOAT4(a, x1, b, y1); CAXPBY_FLOAT4(a, x2, b, y2); CAXPBY_FLOAT4(a, x3, b, y3); CAXPBY_FLOAT4(a, x4, b, y4); CAXPBY_FLOAT4(a, x5, b, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpbyHKernel(float2 a, float2 b, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPBY_FLOAT2(a, x0, b, y0); CAXPBY_FLOAT2(a, x1, b, y1); CAXPBY_FLOAT2(a, x2, b, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] = c*x[i] + b*y[i] void caxpbyCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(9, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbyCuda(a, x.Even(), b, y.Even()); caxpbyCuda(a, x.Odd(), b, y.Odd()); return; } quda::blas_bytes += 3*x.RealLength()*x.Precision(); quda::blas_flops += 7*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y); caxpbyDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); caxpbySKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), length); } else { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); bindTexture(&x, &y); if (x.Nspin() == 4){ //wilson caxpbyHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else if (x.Nspin() == 1){ //staggered caxpbyHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 3*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void cxpaypbzDKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 T1 = READ_DOUBLE2_TEXTURE(x, i); Float2 T2 = READ_DOUBLE2_TEXTURE(y, i); Float2 T3 = read_Float2(z, i); T1.x += a.x*T2.x - a.y*T2.y; T1.y += a.y*T2.x + a.x*T2.y; T1.x += b.x*T3.x - b.y*T3.y; T1.y += b.y*T3.x + b.x*T3.y; z[i] = make_Float2(T1); i += gridSize; } } template <typename Float2> __global__ void cxpaypbzSKernel(Float2 *x, Float2 a, Float2 *y, Float2 b, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 T1 = read_Float2(x, i); Float2 T2 = read_Float2(y, i); Float2 T3 = read_Float2(z, i); T1.x += a.x*T2.x - a.y*T2.y; T1.y += a.y*T2.x + a.x*T2.y; T1.x += b.x*T3.x - b.y*T3.y; T1.y += b.y*T3.x + b.x*T3.y; z[i] = make_Float2(T1); i += gridSize; } } __global__ void cxpaypbzHKernel(float2 a, float2 b, short4 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CXPAYPBZ_FLOAT4(x0, a, y0, b, z0); CXPAYPBZ_FLOAT4(x1, a, y1, b, z1); CXPAYPBZ_FLOAT4(x2, a, y2, b, z2); CXPAYPBZ_FLOAT4(x3, a, y3, b, z3); CXPAYPBZ_FLOAT4(x4, a, y4, b, z4); CXPAYPBZ_FLOAT4(x5, a, y5, b, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); i += gridSize; } } __global__ void cxpaypbzHKernel(float2 a, float2 b, short2 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CXPAYPBZ_FLOAT2(x0, a, y0, b, z0); CXPAYPBZ_FLOAT2(x1, a, y1, b, z1); CXPAYPBZ_FLOAT2(x2, a, y2, b, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); i += gridSize; } } // performs the operation z[i] = x[i] + a*y[i] + b*z[i] void cxpaypbzCuda(cudaColorSpinorField &x, const quda::Complex &a, cudaColorSpinorField &y, const quda::Complex &b, cudaColorSpinorField &z) { checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; setBlock(10, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { cxpaypbzCuda(x.Even(), a, y.Even(), b, z.Even()); cxpaypbzCuda(x.Odd(), a, y.Odd(), b, z.Odd()); return; } quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y); cxpaypbzDKernel<<<blasGrid, blasBlock>>>((double2*)x.V(), a2, (double2*)y.V(), b2, (double2*)z.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); cxpaypbzSKernel<<<blasGrid, blasBlock>>>((float2*)x.V(), a2, (float2*)y.V(), b2, (float2*)z.V(), length); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() ==4 ){//wilson cxpaypbzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() ==1 ){//staggered cxpaypbzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyBzpcxDKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = READ_DOUBLE2_TEXTURE(x, i); Float2 z_i = READ_DOUBLE2_TEXTURE(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = b*z_i.x + c*x_i.x; x[i].y = b*z_i.y + c*x_i.y; i += gridSize; } } template <typename Float, typename Float2> __global__ void axpyBzpcxSKernel(Float a, Float2 *x, Float2 *y, Float b, Float2 *z, Float c, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = read_Float2(x, i); Float2 z_i = read_Float2(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = b*z_i.x + c*x_i.x; x[i].y = b*z_i.y + c*x_i.y; i += gridSize; } } __global__ void axpyBzpcxHKernel(float a, float b, float c, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); AXPY_FLOAT4(a, x0, y0); AXPBY_FLOAT4(b, z0, c, x0); AXPY_FLOAT4(a, x1, y1); AXPBY_FLOAT4(b, z1, c, x1); AXPY_FLOAT4(a, x2, y2); AXPBY_FLOAT4(b, z2, c, x2); AXPY_FLOAT4(a, x3, y3); AXPBY_FLOAT4(b, z3, c, x3); AXPY_FLOAT4(a, x4, y4); AXPBY_FLOAT4(b, z4, c, x4); AXPY_FLOAT4(a, x5, y5); AXPBY_FLOAT4(b, z5, c, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axpyBzpcxHKernel(float a, float b, float c, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); AXPY_FLOAT2(a, x0, y0); AXPBY_FLOAT2(b, z0, c, x0); AXPY_FLOAT2(a, x1, y1); AXPBY_FLOAT2(b, z1, c, x1); AXPY_FLOAT2(a, x2, y2); AXPBY_FLOAT2(b, z2, c, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operations: {y[i] = a*x[i] + y[i]; x[i] = b*z[i] + c*x[i]} void axpyBzpcxCuda(const double &a, cudaColorSpinorField& x, cudaColorSpinorField& y, const double &b, cudaColorSpinorField& z, const double &c) { checkSpinor(x,y); checkSpinor(x,z); setBlock(11, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET){ axpyBzpcxCuda(a, x.Even(), y.Even(), b, z.Even(), c); axpyBzpcxCuda(a, x.Odd(), y.Odd(), b, z.Odd(), c); return ; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, 0, &z); axpyBzpcxDKernel<<<blasGrid, blasBlock>>>(a, (double2*)x.V(), (double2*)y.V(), b, (double2*)z.V(), c, x.Length()/2); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { axpyBzpcxSKernel<<<blasGrid, blasBlock>>>((float)a, (float2*)x.V(), (float2*)y.V(), (float)b, (float2*)z.V(), (float)c, x.Length()/2); } else { bindTexture(&x, &y, &z); if (x.Nspin() == 4){ //wilson axpyBzpcxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (float)c, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else if (x.Nspin() == 1){ //staggered axpyBzpcxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (float)c, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 5*x.Volume()*sizeof(float); } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 10*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float, typename Float2> __global__ void axpyZpbxDKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = READ_DOUBLE2_TEXTURE(x, i); Float2 z_i = READ_DOUBLE2_TEXTURE(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = z_i.x + b*x_i.x; x[i].y = z_i.y + b*x_i.y; i += gridSize; } } template <typename Float, typename Float2> __global__ void axpyZpbxSKernel(Float a, Float2 *x, Float2 *y, Float2 *z, Float b, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 x_i = read_Float2(x, i); Float2 z_i = read_Float2(z, i); y[i].x += a*x_i.x; y[i].y += a*x_i.y; x[i].x = z_i.x + b*x_i.x; x[i].y = z_i.y + b*x_i.y; i += gridSize; } } __global__ void axpyZpbxHKernel(float a, float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AXPY_FLOAT4(a, x0, y0); AXPY_FLOAT4(a, x1, y1); AXPY_FLOAT4(a, x2, y2); AXPY_FLOAT4(a, x3, y3); AXPY_FLOAT4(a, x4, y4); AXPY_FLOAT4(a, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); XPAY_FLOAT4(z0, b, x0); XPAY_FLOAT4(z1, b, x1); XPAY_FLOAT4(z2, b, x2); XPAY_FLOAT4(z3, b, x3); XPAY_FLOAT4(z4, b, x4); XPAY_FLOAT4(z5, b, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); i += gridSize; } } __global__ void axpyZpbxHKernel(float a, float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); AXPY_FLOAT2(a, x0, y0); XPAY_FLOAT2(z0, b, x0); AXPY_FLOAT2(a, x1, y1); XPAY_FLOAT2(z1, b, x1); AXPY_FLOAT2(a, x2, y2); XPAY_FLOAT2(z2, b, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); i += gridSize; } } // performs the operations: {y[i] = a*x[i] + y[i]; x[i] = z[i] + b*x[i]} void axpyZpbxCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z, const double &b) { checkSpinor(x,y); checkSpinor(x,z); setBlock(12, x.Length(), x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { axpyZpbxCuda(a, x.Even(), y.Even(), z.Even(), b); axpyZpbxCuda(a, x.Odd(), y.Odd(), z.Odd(), b); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, 0, &z); axpyZpbxDKernel<<<blasGrid, blasBlock>>> (a, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), b, x.Length()/2); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { axpyZpbxSKernel<<<blasGrid, blasBlock>>> ((float)a, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), (float)b, x.Length()/2); } else { bindTexture(&x, &y, &z); int spinor_bytes = x.Length()*sizeof(short); if (x.Nspin() ==4){ //wilson axpyZpbxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else if (x.Nspin() == 1){ //staggered axpyZpbxHKernel<<<blasGrid, blasBlock>>>((float)a, (float)b, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 5*x.Volume()*sizeof(float); } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbypzYmbwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = READ_DOUBLE2_TEXTURE(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = READ_DOUBLE2_TEXTURE(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); Float2 W = read_Float2(w, i); Y.x -= b.x*W.x - b.y*W.y; Y.y -= b.y*W.x + b.x*W.y; y[i] = make_Float2(Y); i += gridSize; } } template <typename Float2> __global__ void caxpbypzYmbwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = read_Float2(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = read_Float2(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); Float2 W = read_Float2(w, i); Y.x -= b.x*W.x - b.y*W.y; Y.y -= b.y*W.x + b.x*W.y; y[i] = make_Float2(Y); i += gridSize; } } __global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); READ_HALF_SPINOR(w, texHalf4, stride); float2 b2 = -wc*b; CAXPY_FLOAT4(b2, w0, y0); CAXPY_FLOAT4(b2, w1, y1); CAXPY_FLOAT4(b2, w2, y2); CAXPY_FLOAT4(b2, w3, y3); CAXPY_FLOAT4(b2, w4, y4); CAXPY_FLOAT4(b2, w5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void caxpbypzYmbwHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); READ_HALF_SPINOR_ST(w, texHalfSt4, stride); float2 b2 = -wc*b; CAXPY_FLOAT2(b2, w0, y0); CAXPY_FLOAT2(b2, w1, y1); CAXPY_FLOAT2(b2, w2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + z[i] and y[i] -= b*w[i] void caxpbypzYmbwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w) { checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); int length = x.Length()/2; setBlock(13, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypzYmbwCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even()); caxpbypzYmbwCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z); caxpbypzYmbwDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), (double2*)w.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); caxpbypzYmbwSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), (float2*)w.V(), length); } else { quda::blas_bytes += 6*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); bindTexture(&x, &y, &z, &w); if (x.Nspin() == 4){ //wilson caxpbypzYmbwHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered caxpbypzYmbwHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 6*x.RealLength()*x.Precision(); quda::blas_flops += 12*x.RealLength(); if (!blasTuning) checkCudaError(); } #if (__COMPUTE_CAPABILITY__ < 130) // Computes c = a + b in "double single" precision. __device__ void dsadd(volatile QudaSumFloat &c0, volatile QudaSumFloat &c1, const volatile QudaSumFloat &a0, const volatile QudaSumFloat &a1, const float b0, const float b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0 + b0; QudaSumFloat e = t1 - a0; QudaSumFloat t2 = ((b0 - e) + (a0 - (t1 - e))) + a1 + b1; // The result is t1 + t2, after normalization. c0 = e = t1 + t2; c1 = t2 - (e - t1); } // Computes c = a + b in "double single" precision (complex version) __device__ void zcadd(volatile QudaSumComplex &c0, volatile QudaSumComplex &c1, const volatile QudaSumComplex &a0, const volatile QudaSumComplex &a1, const volatile QudaSumComplex &b0, const volatile QudaSumComplex &b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0.x + b0.x; QudaSumFloat e = t1 - a0.x; QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x; // The result is t1 + t2, after normalization. c0.x = e = t1 + t2; c1.x = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.y + b0.y; e = t1 - a0.y; t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y; // The result is t1 + t2, after normalization. c0.y = e = t1 + t2; c1.y = t2 - (e - t1); } // Computes c = a + b in "double single" precision (float3 version) __device__ void dsadd3(volatile QudaSumFloat3 &c0, volatile QudaSumFloat3 &c1, const volatile QudaSumFloat3 &a0, const volatile QudaSumFloat3 &a1, const volatile QudaSumFloat3 &b0, const volatile QudaSumFloat3 &b1) { // Compute dsa + dsb using Knuth's trick. QudaSumFloat t1 = a0.x + b0.x; QudaSumFloat e = t1 - a0.x; QudaSumFloat t2 = ((b0.x - e) + (a0.x - (t1 - e))) + a1.x + b1.x; // The result is t1 + t2, after normalization. c0.x = e = t1 + t2; c1.x = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.y + b0.y; e = t1 - a0.y; t2 = ((b0.y - e) + (a0.y - (t1 - e))) + a1.y + b1.y; // The result is t1 + t2, after normalization. c0.y = e = t1 + t2; c1.y = t2 - (e - t1); // Compute dsa + dsb using Knuth's trick. t1 = a0.z + b0.z; e = t1 - a0.z; t2 = ((b0.z - e) + (a0.z - (t1 - e))) + a1.z + b1.z; // The result is t1 + t2, after normalization. c0.z = e = t1 + t2; c1.z = t2 - (e - t1); } #endif // // double normCuda(float *a, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normD##suffix #define REDUCE_TYPES Float *a #define REDUCE_PARAMS a #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i]*a[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normS##suffix #define REDUCE_TYPES Float *a #define REDUCE_PARAMS a #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION // // double normHCuda(char *, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normH##suffix #define REDUCE_TYPES Float *aN, int stride // dummy type #define REDUCE_PARAMS aN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ REAL_DOT_FLOAT4(norm0, a0, a0); \ REAL_DOT_FLOAT4(norm1, a1, a1); \ REAL_DOT_FLOAT4(norm2, a2, a2); \ REAL_DOT_FLOAT4(norm3, a3, a3); \ REAL_DOT_FLOAT4(norm4, a4, a4); \ REAL_DOT_FLOAT4(norm5, a5, a5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_OPERATION(i) (ac*ac*norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) normHSt##suffix #define REDUCE_TYPES Float *aN, int stride // dummy type #define REDUCE_PARAMS aN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ REAL_DOT_FLOAT2(norm0, a0, a0); \ REAL_DOT_FLOAT2(norm1, a1, a1); \ REAL_DOT_FLOAT2(norm2, a2, a2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_OPERATION(i) (ac*ac*norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double normCuda(const cudaColorSpinorField &a) { if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) return normCuda(a.Even()) + normCuda(a.Odd()); const int id = 14; quda::blas_flops += 2*a.RealLength(); quda::blas_bytes += a.RealLength()*a.Precision(); if (a.Precision() == QUDA_DOUBLE_PRECISION) { return normDCuda((double*)a.V(), a.Length(), id, a.Precision()); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { return normSCuda((float2*)a.V(), a.Length()/2, id, a.Precision()); } else { bindTexture(&a); quda::blas_bytes += (a.RealLength()*a.Precision()) / (a.Ncolor() * a.Nspin()); if (a.Nspin() == 4){ //wilson return normHCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else if (a.Nspin() == 1) { //staggered return normHStCuda((float*)a.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin()); return 0; } } } // // double reDotProductFCuda(float *a, float *b, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductD##suffix #define REDUCE_TYPES Float *a, Float *b #define REDUCE_PARAMS a, b #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i]*b[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductS##suffix #define REDUCE_TYPES Float *a, Float *b #define REDUCE_PARAMS a, b #define REDUCE_AUXILIARY(i) #define REDUCE_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION // // double reDotProductHCuda(float *a, float *b, int n) {} // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductH##suffix #define REDUCE_TYPES Float *aN, Float *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ READ_HALF_SPINOR(b, texHalf2, stride); \ REAL_DOT_FLOAT4(rdot0, a0, b0); \ REAL_DOT_FLOAT4(rdot1, a1, b1); \ REAL_DOT_FLOAT4(rdot2, a2, b2); \ REAL_DOT_FLOAT4(rdot3, a3, b3); \ REAL_DOT_FLOAT4(rdot4, a4, b4); \ REAL_DOT_FLOAT4(rdot5, a5, b5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_OPERATION(i) (ac*bc*rdot0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) reDotProductHSt##suffix #define REDUCE_TYPES Float *aN, Float *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \ REAL_DOT_FLOAT2(rdot0, a0, b0); \ REAL_DOT_FLOAT2(rdot1, a1, b1); \ REAL_DOT_FLOAT2(rdot2, a2, b2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_OPERATION(i) (ac*bc*rdot0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double reDotProductCuda(cudaColorSpinorField &a, cudaColorSpinorField &b) { if (a.SiteSubset() == QUDA_FULL_SITE_SUBSET) { return reDotProductCuda(a.Even(), b.Even()) + reDotProductCuda(a.Odd(), b.Odd()); } const int id = 15; quda::blas_flops += 2*a.RealLength(); checkSpinor(a, b); quda::blas_bytes += 2*a.RealLength()*a.Precision(); if (a.Precision() == QUDA_DOUBLE_PRECISION) { return reDotProductDCuda((double*)a.V(), (double*)b.V(), a.Length(), id, a.Precision()); } else if (a.Precision() == QUDA_SINGLE_PRECISION) { return reDotProductSCuda((float2*)a.V(), (float2*)b.V(), a.Length()/2, id, a.Precision()); } else { quda::blas_bytes += 2*a.Volume()*sizeof(float); bindTexture(&a, &b); if (a.Nspin() == 4){ //wilson return reDotProductHCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else if (a.Nspin() == 1){ //staggered return reDotProductHStCuda((float*)a.Norm(), (float*)b.Norm(), a.Stride(), a.Volume(), id, a.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, a.Nspin()); return 0; } } } // // double axpyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y #define REDUCE_PARAMS a, x, y #define REDUCE_AUXILIARY(i) y[i] = a*x[i] + y[i] #define REDUCE_OPERATION(i) (y[i]*y[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix #define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ AXPY_FLOAT4(a, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ AXPY_FLOAT4(a, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ AXPY_FLOAT4(a, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ AXPY_FLOAT4(a, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ AXPY_FLOAT4(a, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ AXPY_FLOAT4(a, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) axpyNormH##suffix #define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ AXPY_FLOAT2(a, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ AXPY_FLOAT2(a, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ AXPY_FLOAT2(a, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double axpyNormCuda(const double &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return axpyNormCuda(a, x.Even(), y.Even()) + axpyNormCuda(a, x.Odd(), y.Odd()); const int id = 16; quda::blas_flops += 4*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { return axpyNormFCuda(a, (double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return axpyNormFCuda((float)a, (float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return axpyNormHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return axpyNormHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double xmyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = x[i] - y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormF##suffix #define REDUCE_TYPES Float *x, Float *y #define REDUCE_PARAMS x, y #define REDUCE_AUXILIARY(i) y[i] = x[i] - y[i] #define REDUCE_OPERATION(i) (y[i]*y[i]) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix #define REDUCE_TYPES Float *d1, Float *d2, short4 *yH, float *yN, int stride #define REDUCE_PARAMS d1, d2, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ XMY_FLOAT4(x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ XMY_FLOAT4(x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ XMY_FLOAT4(x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ XMY_FLOAT4(x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ XMY_FLOAT4(x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ XMY_FLOAT4(x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) xmyNormH##suffix #define REDUCE_TYPES Float *d1, Float *d2, short2 *yH, float *yN, int stride #define REDUCE_PARAMS d1, d2, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ XMY_FLOAT2(x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ XMY_FLOAT2(x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ XMY_FLOAT2(x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double xmyNormCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return xmyNormCuda(x.Even(), y.Even()) + xmyNormCuda(x.Odd(), y.Odd()); const int id = 17; quda::blas_flops += 3*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { return xmyNormFCuda((double*)x.V(), (double*)y.V(), x.Length(), id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return xmyNormFCuda((float*)x.V(), (float*)y.V(), x.Length(), id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); if (x.Nspin() ==4 ){ //wilsin return xmyNormHCuda((char*)0, (char*)0, (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ return xmyNormHCuda((char*)0, (char*)0, (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double2 cDotProductCuda(float2 *x, float2 *y, int n) {} // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y, Float c #define REDUCE_PARAMS x, y, c #define REDUCE_REAL_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_IMAG_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductS##suffix #define REDUCE_TYPES Float2 *x, Float2 *y, Float c #define REDUCE_PARAMS x, y, c #define REDUCE_REAL_AUXILIARY(i) Float2 a = read_Float2(x, i); #define REDUCE_IMAG_AUXILIARY(i) Float2 b = read_Float2(y, i); #define REDUCE_REAL_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_IMAG_OPERATION(i) (a.x*b.y - a.y*b.x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductH##suffix #define REDUCE_TYPES Float *aN, Float2 *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_REAL_AUXILIARY(i) \ READ_HALF_SPINOR(a, texHalf1, stride); \ READ_HALF_SPINOR(b, texHalf2, stride); \ REAL_DOT_FLOAT4(rdot0, a0, b0); \ REAL_DOT_FLOAT4(rdot1, a1, b1); \ REAL_DOT_FLOAT4(rdot2, a2, b2); \ REAL_DOT_FLOAT4(rdot3, a3, b3); \ REAL_DOT_FLOAT4(rdot4, a4, b4); \ REAL_DOT_FLOAT4(rdot5, a5, b5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, a0, b0); \ IMAG_DOT_FLOAT4(idot1, a1, b1); \ IMAG_DOT_FLOAT4(idot2, a2, b2); \ IMAG_DOT_FLOAT4(idot3, a3, b3); \ IMAG_DOT_FLOAT4(idot4, a4, b4); \ IMAG_DOT_FLOAT4(idot5, a5, b5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0) #define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductHSt##suffix #define REDUCE_TYPES Float *aN, Float2 *bN, int stride #define REDUCE_PARAMS aN, bN, stride #define REDUCE_REAL_AUXILIARY(i) \ READ_HALF_SPINOR_ST(a, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(b, texHalfSt2, stride); \ REAL_DOT_FLOAT2(rdot0, a0, b0); \ REAL_DOT_FLOAT2(rdot1, a1, b1); \ REAL_DOT_FLOAT2(rdot2, a2, b2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, a0, b0); \ IMAG_DOT_FLOAT2(idot1, a1, b1); \ IMAG_DOT_FLOAT2(idot2, a2, b2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_REAL_OPERATION(i) (ac*bc*rdot0) #define REDUCE_IMAG_OPERATION(i) (ac*bc*idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex cDotProductCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductCuda(x.Even(), y.Even()) + cDotProductCuda(x.Odd(), y.Odd()); const int id = 18; quda::blas_flops += 4*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { char c = 0; bindTexture(&x, &y); dot = cDotProductDCuda((double2*)x.V(), (double2*)y.V(), c, length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { char c = 0; // FIXME: bindTexture() will incorrectly bind this as float4 since it queries the spin int spinor_bytes = x.Length()*sizeof(float); cudaBindTexture(0, xTexSingle2, x.V(), spinor_bytes); cudaBindTexture(0, yTexSingle2, y.V(), spinor_bytes); dot = cDotProductSCuda((float2*)x.V(), (float2*)y.V(), c, length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ dot = cDotProductHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ dot = cDotProductHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); } // // double2 xpaycDotzyCuda(float2 *x, float a, float2 *y, float2 *z, int n) {} // // First performs the operation y = x + a*y // Second returns complex dot product (z,y) // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyD##suffix #define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z #define REDUCE_PARAMS x, a, y, z #define REDUCE_REAL_AUXILIARY(i) \ Float2 X = READ_DOUBLE2_TEXTURE(x, i); \ Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \ Float2 Z = READ_DOUBLE2_TEXTURE(z, i); #define REDUCE_IMAG_AUXILIARY(i) y[i].x = X.x + a*Y.x; y[i].y = X.y + a*Y.y #define REDUCE_REAL_OPERATION(i) (Z.x*y[i].x + Z.y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (Z.x*y[i].y - Z.y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyS##suffix #define REDUCE_TYPES Float2 *x, Float a, Float2 *y, Float2 *z #define REDUCE_PARAMS x, a, y, z #define REDUCE_REAL_AUXILIARY(i) y[i].x = x[i].x + a*y[i].x #define REDUCE_IMAG_AUXILIARY(i) y[i].y = x[i].y + a*y[i].y #define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix #define REDUCE_TYPES Float a, short4 *yH, Float2 *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ XPAY_FLOAT4(x0, a, y0); \ XPAY_FLOAT4(x1, a, y1); \ XPAY_FLOAT4(x2, a, y2); \ XPAY_FLOAT4(x3, a, y3); \ XPAY_FLOAT4(x4, a, y4); \ XPAY_FLOAT4(x5, a, y5); \ REAL_DOT_FLOAT4(rdot0, z0, y0); \ REAL_DOT_FLOAT4(rdot1, z1, y1); \ REAL_DOT_FLOAT4(rdot2, z2, y2); \ REAL_DOT_FLOAT4(rdot3, z3, y3); \ REAL_DOT_FLOAT4(rdot4, z4, y4); \ REAL_DOT_FLOAT4(rdot5, z5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, z0, y0); \ IMAG_DOT_FLOAT4(idot1, z1, y1); \ IMAG_DOT_FLOAT4(idot2, z2, y2); \ IMAG_DOT_FLOAT4(idot3, z3, y3); \ IMAG_DOT_FLOAT4(idot4, z4, y4); \ IMAG_DOT_FLOAT4(idot5, z5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) xpaycDotzyH##suffix #define REDUCE_TYPES Float a, short2 *yH, Float2 *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ XPAY_FLOAT2(x0, a, y0); \ XPAY_FLOAT2(x1, a, y1); \ XPAY_FLOAT2(x2, a, y2); \ REAL_DOT_FLOAT2(rdot0, z0, y0); \ REAL_DOT_FLOAT2(rdot1, z1, y1); \ REAL_DOT_FLOAT2(rdot2, z2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_IMAG_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, z0, y0); \ IMAG_DOT_FLOAT2(idot1, z1, y1); \ IMAG_DOT_FLOAT2(idot2, z2, y2); \ idot0 += idot1; idot0 += idot2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex xpaycDotzyCuda(cudaColorSpinorField &x, const double &a, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return xpaycDotzyCuda(x.Even(), a, y.Even(), z.Even()) + xpaycDotzyCuda(x.Odd(), a, y.Odd(), z.Odd()); const int id = 19; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; quda::blas_bytes += 4*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y, &z); dot = xpaycDotzyDCuda((double2*)x.V(), a, (double2*)y.V(), (double2*)z.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { dot = xpaycDotzySCuda((float2*)x.V(), (float)a, (float2*)y.V(), (float2*)z.V(), length, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); if (x.Nspin() ==4 ){//wilson dot = xpaycDotzyHCuda((float)a, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() ==1 ){//wilson dot = xpaycDotzyHCuda((float)a, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); } // // double3 cDotProductNormACuda(float2 *a, float2 *b, int n) {} // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y #define REDUCE_PARAMS x, y #define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x) #define REDUCE_Z_OPERATION(i) (a.x*a.x + a.y*a.y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAS##suffix #define REDUCE_TYPES Float2 *a, Float2 *b #define REDUCE_PARAMS a, b #define REDUCE_X_AUXILIARY(i) #define REDUCE_Y_AUXILIARY(i) #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x) #define REDUCE_Z_OPERATION(i) (a[i].x*a[i].x + a[i].y*a[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAH##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR(x, texHalf1, stride); \ READ_HALF_SPINOR(y, texHalf2, stride); \ REAL_DOT_FLOAT4(norm0, x0, x0); \ REAL_DOT_FLOAT4(norm1, x1, x1); \ REAL_DOT_FLOAT4(norm2, x2, x2); \ REAL_DOT_FLOAT4(norm3, x3, x3); \ REAL_DOT_FLOAT4(norm4, x4, x4); \ REAL_DOT_FLOAT4(norm5, x5, x5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, x0, y0); \ REAL_DOT_FLOAT4(rdot1, x1, y1); \ REAL_DOT_FLOAT4(rdot2, x2, y2); \ REAL_DOT_FLOAT4(rdot3, x3, y3); \ REAL_DOT_FLOAT4(rdot4, x4, y4); \ REAL_DOT_FLOAT4(rdot5, x5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, x0, y0); \ IMAG_DOT_FLOAT4(idot1, x1, y1); \ IMAG_DOT_FLOAT4(idot2, x2, y2); \ IMAG_DOT_FLOAT4(idot3, x3, y3); \ IMAG_DOT_FLOAT4(idot4, x4, y4); \ IMAG_DOT_FLOAT4(idot5, x5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (xc*xc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormAHSt##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \ REAL_DOT_FLOAT2(norm0, x0, x0); \ REAL_DOT_FLOAT2(norm1, x1, x1); \ REAL_DOT_FLOAT2(norm2, x2, x2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, x0, y0); \ REAL_DOT_FLOAT2(rdot1, x1, y1); \ REAL_DOT_FLOAT2(rdot2, x2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, x0, y0); \ IMAG_DOT_FLOAT2(idot1, x1, y1); \ IMAG_DOT_FLOAT2(idot2, x2, y2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (xc*xc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION double3 cDotProductNormACuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductNormACuda(x.Even(), y.Even()) + cDotProductNormACuda(x.Odd(), y.Odd()); const int id = 20; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); return cDotProductNormADCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return cDotProductNormASCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return cDotProductNormAHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ //staggered return cDotProductNormAHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double3 cDotProductNormBCuda(float2 *a, float2 *b, int n) {} // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBD##suffix #define REDUCE_TYPES Float2 *x, Float2 *y #define REDUCE_PARAMS x, y #define REDUCE_X_AUXILIARY(i) Float2 a = READ_DOUBLE2_TEXTURE(x, i); #define REDUCE_Y_AUXILIARY(i) Float2 b = READ_DOUBLE2_TEXTURE(y, i); #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a.x*b.x + a.y*b.y) #define REDUCE_Y_OPERATION(i) (a.x*b.y - a.y*b.x) #define REDUCE_Z_OPERATION(i) (b.x*b.x + b.y*b.y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBS##suffix #define REDUCE_TYPES Float2 *a, Float2 *b #define REDUCE_PARAMS a, b #define REDUCE_X_AUXILIARY(i) #define REDUCE_Y_AUXILIARY(i) #define REDUCE_Z_AUXILIARY(i) #define REDUCE_X_OPERATION(i) (a[i].x*b[i].x + a[i].y*b[i].y) #define REDUCE_Y_OPERATION(i) (a[i].x*b[i].y - a[i].y*b[i].x) #define REDUCE_Z_OPERATION(i) (b[i].x*b[i].x + b[i].y*b[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBH##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR(x, texHalf1, stride); \ READ_HALF_SPINOR(y, texHalf2, stride); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, x0, y0); \ REAL_DOT_FLOAT4(rdot1, x1, y1); \ REAL_DOT_FLOAT4(rdot2, x2, y2); \ REAL_DOT_FLOAT4(rdot3, x3, y3); \ REAL_DOT_FLOAT4(rdot4, x4, y4); \ REAL_DOT_FLOAT4(rdot5, x5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT4(idot0, x0, y0); \ IMAG_DOT_FLOAT4(idot1, x1, y1); \ IMAG_DOT_FLOAT4(idot2, x2, y2); \ IMAG_DOT_FLOAT4(idot3, x3, y3); \ IMAG_DOT_FLOAT4(idot4, x4, y4); \ IMAG_DOT_FLOAT4(idot5, x5, y5); \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (yc*yc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) cDotProductNormBHSt##suffix #define REDUCE_TYPES Float2 *xN, Float2 *yN, int stride #define REDUCE_PARAMS xN, yN, stride #define REDUCE_X_AUXILIARY(i) \ READ_HALF_SPINOR_ST(x, texHalfSt1, stride); \ READ_HALF_SPINOR_ST(y, texHalfSt2, stride); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; #define REDUCE_Y_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, x0, y0); \ REAL_DOT_FLOAT2(rdot1, x1, y1); \ REAL_DOT_FLOAT2(rdot2, x2, y2); \ rdot0 += rdot1; rdot0 += rdot2; #define REDUCE_Z_AUXILIARY(i) \ IMAG_DOT_FLOAT2(idot0, x0, y0); \ IMAG_DOT_FLOAT2(idot1, x1, y1); \ IMAG_DOT_FLOAT2(idot2, x2, y2); \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (xc*yc*rdot0) #define REDUCE_Y_OPERATION(i) (xc*yc*idot0) #define REDUCE_Z_OPERATION(i) (yc*yc*norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION double3 cDotProductNormBCuda(cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cDotProductNormBCuda(x.Even(), y.Even()) + cDotProductNormBCuda(x.Odd(), y.Odd()); const int id = 21; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); int length = x.Length()/2; quda::blas_bytes += 2*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { bindTexture(&x, &y); return cDotProductNormBDCuda((double2*)x.V(), (double2*)y.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { return cDotProductNormBSCuda((float2*)x.V(), (float2*)y.V(), length, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 2*x.Volume()*sizeof(float); if (x.Nspin() == 4){ //wilson return cDotProductNormBHCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ //staggered return cDotProductNormBHStCuda((float*)x.Norm(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } // // double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y, // float2 *z, float2 *w, float2 *u, int len) // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYD##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u #define REDUCE_PARAMS a, x, b, y, z, w, u #define REDUCE_X_AUXILIARY(i) \ Float2 X = READ_DOUBLE2_TEXTURE(x, i); \ Float2 Y = READ_DOUBLE2_TEXTURE(y, i); \ Float2 W = READ_DOUBLE2_TEXTURE(w, i); #define REDUCE_Y_AUXILIARY(i) \ Float2 Z = read_Float2(z, i); \ Z.x += a.x*X.x - a.y*X.y; \ Z.y += a.y*X.x + a.x*X.y; \ Z.x += b.x*Y.x - b.y*Y.y; \ Z.y += b.y*Y.x + b.x*Y.y; \ Y.x -= b.x*W.x - b.y*W.y; \ Y.y -= b.y*W.x + b.x*W.y; #define REDUCE_Z_AUXILIARY(i) \ z[i] = make_Float2(Z); \ y[i] = make_Float2(Y); #define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y) #define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x) #define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYS##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, Float2 *w, Float2 *u #define REDUCE_PARAMS a, x, b, y, z, w, u #define REDUCE_X_AUXILIARY(i) \ Float2 X = read_Float2(x, i); \ Float2 Y = read_Float2(y, i); \ Float2 W = read_Float2(w, i); #define REDUCE_Y_AUXILIARY(i) \ Float2 Z = read_Float2(z, i); \ Z.x += a.x*X.x - a.y*X.y; \ Z.y += a.y*X.x + a.x*X.y; \ Z.x += b.x*Y.x - b.y*Y.y; \ Z.y += b.y*Y.x + b.x*Y.y; \ Y.x -= b.x*W.x - b.y*W.y; \ Y.y -= b.y*W.x + b.x*W.y; #define REDUCE_Z_AUXILIARY(i) \ z[i] = make_Float2(Z); \ y[i] = make_Float2(Y); #define REDUCE_X_OPERATION(i) (u[i].x*y[i].x + u[i].y*y[i].y) #define REDUCE_Y_OPERATION(i) (u[i].x*y[i].y - u[i].y*y[i].x) #define REDUCE_Z_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION // // double3 caxpbypzYmbwcDotProductWYNormYCuda(float2 a, float2 *x, float2 b, float2 *y, // float2 *z, float2 *w, float2 *u, int len) // template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix #define REDUCE_TYPES Float2 a, Float2 b, short4 *yH, float *yN, short4 *zH, float *zN, float *wN, float *uN, int stride #define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride #define REDUCE_X_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); \ CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); \ CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); \ CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); \ CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); \ CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); \ READ_HALF_SPINOR(w, texHalf4, stride); \ float2 bwc = -wc*b; \ CAXPY_FLOAT4(bwc, w0, y0); \ CAXPY_FLOAT4(bwc, w1, y1); \ CAXPY_FLOAT4(bwc, w2, y2); \ CAXPY_FLOAT4(bwc, w3, y3); \ CAXPY_FLOAT4(bwc, w4, y4); \ CAXPY_FLOAT4(bwc, w5, y5); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_Y_AUXILIARY(i) \ READ_HALF_SPINOR(u, texHalf5, stride); \ REAL_DOT_FLOAT4(rdot0, u0, y0); \ REAL_DOT_FLOAT4(rdot1, u1, y1); \ REAL_DOT_FLOAT4(rdot2, u2, y2); \ REAL_DOT_FLOAT4(rdot3, u3, y3); \ REAL_DOT_FLOAT4(rdot4, u4, y4); \ REAL_DOT_FLOAT4(rdot5, u5, y5); \ IMAG_DOT_FLOAT4(idot0, u0, y0); \ IMAG_DOT_FLOAT4(idot1, u1, y1); \ IMAG_DOT_FLOAT4(idot2, u2, y2); \ IMAG_DOT_FLOAT4(idot3, u3, y3); \ IMAG_DOT_FLOAT4(idot4, u4, y4); \ IMAG_DOT_FLOAT4(idot5, u5, y5); #define REDUCE_Z_AUXILIARY(i) \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2, norm0 += norm4; \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_X_OPERATION(i) (uc*rdot0) #define REDUCE_Y_OPERATION(i) (uc*idot0) #define REDUCE_Z_OPERATION(i) (norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION template <unsigned int reduce_threads, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpbypzYmbwcDotProductUYNormYH##suffix #define REDUCE_TYPES Float2 a, Float2 b, short2 *yH, float *yN, short2 *zH, float *zN, float *wN, float *uN, int stride #define REDUCE_PARAMS a, b, yH, yN, zH, zN, wN, uN, stride #define REDUCE_X_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); \ CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); \ CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); \ READ_HALF_SPINOR_ST(w, texHalfSt4, stride); \ float2 bwc = -wc*b; \ CAXPY_FLOAT2(bwc, w0, y0); \ CAXPY_FLOAT2(bwc, w1, y1); \ CAXPY_FLOAT2(bwc, w2, y2); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_Y_AUXILIARY(i) \ READ_HALF_SPINOR_ST(u, texHalfSt5, stride); \ REAL_DOT_FLOAT2(rdot0, u0, y0); \ REAL_DOT_FLOAT2(rdot1, u1, y1); \ REAL_DOT_FLOAT2(rdot2, u2, y2); \ IMAG_DOT_FLOAT2(idot0, u0, y0); \ IMAG_DOT_FLOAT2(idot1, u1, y1); \ IMAG_DOT_FLOAT2(idot2, u2, y2); #define REDUCE_Z_AUXILIARY(i) \ norm0 += norm1; norm0 += norm2; \ rdot0 += rdot1; rdot0 += rdot2; \ idot0 += idot1; idot0 += idot2; #define REDUCE_X_OPERATION(i) (uc*rdot0) #define REDUCE_Y_OPERATION(i) (uc*idot0) #define REDUCE_Z_OPERATION(i) (norm0) #include "reduce_triple_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_X_AUXILIARY #undef REDUCE_Y_AUXILIARY #undef REDUCE_Z_AUXILIARY #undef REDUCE_X_OPERATION #undef REDUCE_Y_OPERATION #undef REDUCE_Z_OPERATION // This convoluted kernel does the following: z += a*x + b*y, y -= b*w, norm = (y,y), dot = (u, y) double3 caxpbypzYmbwcDotProductUYNormYCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z, cudaColorSpinorField &w, cudaColorSpinorField &u) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpbypzYmbwcDotProductUYNormYCuda(a, x.Even(), b, y.Even(), z.Even(), w.Even(), u.Even()) + caxpbypzYmbwcDotProductUYNormYCuda(a, x.Odd(), b, y.Odd(), z.Odd(), w.Odd(), u.Odd()); const int id = 22; quda::blas_flops += 18*x.RealLength(); checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); checkSpinor(x,u); int length = x.Length()/2; quda::blas_bytes += 7*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z, &w, &u); return caxpbypzYmbwcDotProductUYNormYDCuda(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), (double2*)w.V(), (double2*)u.V(), length, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); return caxpbypzYmbwcDotProductUYNormYSCuda(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), (float2*)w.V(), (float2*)u.V(), length, id, x.Precision()); } else { // fused nSpin=4 kernel is slow on Fermi // N.B. this introduces an extra half truncation so will affect convergence (for the better?) if (!blasTuning && (__COMPUTE_CAPABILITY__ >= 200) && x.Nspin() == 4) { caxpbypzYmbwCuda(a, x, b, y, z, w); return cDotProductNormBCuda(u, y); } bindTexture(&x, &y, &z, &w, &u); quda::blas_bytes += 7*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4) { // wilson return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(), y.Stride(), y.Volume(), id, x.Precision()); } else if (x.Nspin() == 1){ // staggered return caxpbypzYmbwcDotProductUYNormYHCuda(a2, b2, (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (float*)w.Norm(), (float*)u.Norm(), y.Stride(), y.Volume(), id, x.Precision()); } else { errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } exit(-1); } template <typename Float, typename Float2> __global__ void cabxpyAxKernel(Float a, Float2 b, Float2 *x, Float2 *y, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { x[i].x *= a; x[i].y *= a; y[i].x += b.x*x[i].x - b.y*x[i].y; y[i].y += b.y*x[i].x + b.x*x[i].y; i += gridSize; } } __global__ void cabxpyAxHKernel(float a, float2 b, short4 *xH, float *xN, short4 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); AX_FLOAT4(a, x0); AX_FLOAT4(a, x1); AX_FLOAT4(a, x2); AX_FLOAT4(a, x3); AX_FLOAT4(a, x4); AX_FLOAT4(a, x5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); CAXPY_FLOAT4(b, x0, y0); CAXPY_FLOAT4(b, x1, y1); CAXPY_FLOAT4(b, x2, y2); CAXPY_FLOAT4(b, x3, y3); CAXPY_FLOAT4(b, x4, y4); CAXPY_FLOAT4(b, x5, y5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); i += gridSize; } } __global__ void cabxpyAxHKernel(float a, float2 b, short2 *xH, float *xN, short2 *yH, float *yN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); AX_FLOAT2(a, x0); AX_FLOAT2(a, x1); AX_FLOAT2(a, x2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); CAXPY_FLOAT2(b, x0, y0); CAXPY_FLOAT2(b, x1, y1); CAXPY_FLOAT2(b, x2, y2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); i += gridSize; } } // performs the operation y[i] += a*b*x[i], x[i] *= a void cabxpyAxCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) { checkSpinor(x,y); int length = x.Length()/2; setBlock(23, length, x.Precision()); quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 5*x.RealLength(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 b2 = make_double2(real(b), imag(b)); cabxpyAxKernel<<<blasGrid, blasBlock>>>((double)a, b2, (double2*)x.V(), (double2*)y.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 b2 = make_float2(real(b), imag(b)); cabxpyAxKernel<<<blasGrid, blasBlock>>>((float)a, b2, (float2*)x.V(), (float2*)y.V(), length); } else { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpyCuda(a, x.Even(), y.Even()); caxpyCuda(a, x.Odd(), y.Odd()); return; } bindTexture(&x, &y); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson cabxpyAxHKernel<<<blasGrid, blasBlock>>>((float)a, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); } else if (x.Nspin() == 1){ //staggered cabxpyAxHKernel<<<blasGrid, blasBlock>>>((float)a, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), y.Stride(), y.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } quda::blas_bytes += 4*x.Volume()*sizeof(float); } if (!blasTuning) checkCudaError(); } // // double caxpyNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y #define REDUCE_PARAMS a, x, y #define REDUCE_AUXILIARY(i) \ y[i].x += a.x*x[i].x - a.y*x[i].y; \ y[i].y += a.y*x[i].x + a.x*x[i].y #define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix #define REDUCE_TYPES Float a, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ CAXPY_FLOAT4(a, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ CAXPY_FLOAT4(a, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ CAXPY_FLOAT4(a, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ CAXPY_FLOAT4(a, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ CAXPY_FLOAT4(a, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ CAXPY_FLOAT4(a, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyNormH##suffix #define REDUCE_TYPES Float a, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ CAXPY_FLOAT2(a, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ CAXPY_FLOAT2(a, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ CAXPY_FLOAT2(a, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double caxpyNormCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyNormCuda(a, x.Even(), y.Even()) + caxpyNormCuda(a, x.Odd(), y.Odd()); const int id = 24; quda::blas_flops += 6*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 3*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); return caxpyNormFCuda(a2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); return caxpyNormFCuda(a2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson return caxpyNormHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return caxpyNormHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double caxpyXmayNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second performs the operator x[i] -= a*z[i] // Third returns the norm of x // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXF##suffix #define REDUCE_TYPES Float a, Float *x, Float *y, Float *z #define REDUCE_PARAMS a, x, y, z #define REDUCE_AUXILIARY(i) \ y[i].x += a.x*x[i].x - a.y*x[i].y; \ y[i].y += a.y*x[i].x + a.x*x[i].y; \ x[i].x += a.y*z[i].y - a.x*z[i].x; \ x[i].y -= (a.x*z[i].y + a.y*z[i].x); #define REDUCE_OPERATION(i) (x[i].x*x[i].x + x[i].y*x[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix #define REDUCE_TYPES Float a, short4 *xH, float *xN, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPY_FLOAT4(a, x0, y0); \ CMAXPY_FLOAT4(a, z0, x0); \ REAL_DOT_FLOAT4(norm0, x0, x0); \ CAXPY_FLOAT4(a, x1, y1); \ CMAXPY_FLOAT4(a, z1, x1); \ REAL_DOT_FLOAT4(norm1, x1, x1); \ CAXPY_FLOAT4(a, x2, y2); \ CMAXPY_FLOAT4(a, z2, x2); \ REAL_DOT_FLOAT4(norm2, x2, x2); \ CAXPY_FLOAT4(a, x3, y3); \ CMAXPY_FLOAT4(a, z3, x3); \ REAL_DOT_FLOAT4(norm3, x3, x3); \ CAXPY_FLOAT4(a, x4, y4); \ CMAXPY_FLOAT4(a, z4, x4); \ REAL_DOT_FLOAT4(norm4, x4, x4); \ CAXPY_FLOAT4(a, x5, y5); \ CMAXPY_FLOAT4(a, z5, x5); \ REAL_DOT_FLOAT4(norm5, x5, x5); \ norm0 += norm1; norm2 += norm3; \ norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) caxpyXmazNormXH##suffix #define REDUCE_TYPES Float a, short2 *xH, float *xN, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPY_FLOAT2(a, x0, y0); \ CMAXPY_FLOAT2(a, z0, x0); \ REAL_DOT_FLOAT2(norm0, x0, x0); \ CAXPY_FLOAT2(a, x1, y1); \ CMAXPY_FLOAT2(a, z1, x1); \ REAL_DOT_FLOAT2(norm1, x1, x1); \ CAXPY_FLOAT2(a, x2, y2); \ CMAXPY_FLOAT2(a, z2, x2); \ REAL_DOT_FLOAT2(norm2, x2, x2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double caxpyXmazNormXCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyXmazNormXCuda(a, x.Even(), y.Even(), z.Even()) + caxpyXmazNormXCuda(a, x.Odd(), y.Odd(), z.Odd()); const int id = 25; quda::blas_flops += 10*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 5*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); return caxpyXmazNormXFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); return caxpyXmazNormXFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson return caxpyXmazNormXHCuda(a2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return caxpyXmazNormXHCuda(a2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } // // double cabxpyAxNormCuda(float a, float *x, float *y, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the norm of y // template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormF##suffix #define REDUCE_TYPES Float a, Float b, Float *x, Float *y #define REDUCE_PARAMS a, b, x, y #define REDUCE_AUXILIARY(i) \ x[i].x *= a.x; \ x[i].y *= a.x; \ y[i].x += b.x*x[i].x - b.y*x[i].y; \ y[i].y += b.y*x[i].x + b.x*x[i].y; #define REDUCE_OPERATION(i) (y[i].x*y[i].x + y[i].y*y[i].y) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix #define REDUCE_TYPES Float a, Float b, short4 *xH, float *xN, short4 *yH, float *yN, int stride #define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ AX_FLOAT4(a.x, x0); \ AX_FLOAT4(a.x, x1); \ AX_FLOAT4(a.x, x2); \ AX_FLOAT4(a.x, x3); \ AX_FLOAT4(a.x, x4); \ AX_FLOAT4(a.x, x5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(xH, xN, x, stride); \ CAXPY_FLOAT4(b, x0, y0); \ REAL_DOT_FLOAT4(norm0, y0, y0); \ CAXPY_FLOAT4(b, x1, y1); \ REAL_DOT_FLOAT4(norm1, y1, y1); \ CAXPY_FLOAT4(b, x2, y2); \ REAL_DOT_FLOAT4(norm2, y2, y2); \ CAXPY_FLOAT4(b, x3, y3); \ REAL_DOT_FLOAT4(norm3, y3, y3); \ CAXPY_FLOAT4(b, x4, y4); \ REAL_DOT_FLOAT4(norm4, y4, y4); \ CAXPY_FLOAT4(b, x5, y5); \ REAL_DOT_FLOAT4(norm5, y5, y5); \ norm0 += norm1; norm2 += norm3; norm4 += norm5; norm0 += norm2; norm0 += norm4; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION template <unsigned int reduce_threads, typename Float> #define REDUCE_FUNC_NAME(suffix) cabxpyAxNormH##suffix #define REDUCE_TYPES Float a, Float b, short2 *xH, float *xN, short2 *yH, float *yN, int stride #define REDUCE_PARAMS a, b, xH, xN, yH, yN, stride #define REDUCE_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ AX_FLOAT2(a.x, x0); \ AX_FLOAT2(a.x, x1); \ AX_FLOAT2(a.x, x2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(xH, xN, x, stride); \ CAXPY_FLOAT2(b, x0, y0); \ REAL_DOT_FLOAT2(norm0, y0, y0); \ CAXPY_FLOAT2(b, x1, y1); \ REAL_DOT_FLOAT2(norm1, y1, y1); \ CAXPY_FLOAT2(b, x2, y2); \ REAL_DOT_FLOAT2(norm2, y2, y2); \ norm0 += norm1; norm0 += norm2; \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_OPERATION(i) (norm0) #include "reduce_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_AUXILIARY #undef REDUCE_OPERATION double cabxpyAxNormCuda(const double &a, const quda::Complex &b, cudaColorSpinorField &x, cudaColorSpinorField &y) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return cabxpyAxNormCuda(a, b, x.Even(), y.Even()) + cabxpyAxNormCuda(a, b, x.Odd(), y.Odd()); const int id = 26; quda::blas_flops += 7*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 4*x.RealLength()*x.Precision(); if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(a, 0); double2 b2 = make_double2(real(b), imag(b)); return cabxpyAxNormFCuda(a2, b2, (double2*)x.V(), (double2*)y.V(), x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(a, 0); float2 b2 = make_float2(real(b), imag(b)); return cabxpyAxNormFCuda(a2, b2, (float2*)x.V(), (float2*)y.V(), x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(a, 0); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson return cabxpyAxNormHCuda(a2, b2, (short4*)x.V(), (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered return cabxpyAxNormHCuda(a2, b2, (short2*)x.V(), (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); return 0; } } } template <typename Float2> __global__ void caxpbypzDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = READ_DOUBLE2_TEXTURE(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = READ_DOUBLE2_TEXTURE(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); i += gridSize; } } template <typename Float2> __global__ void caxpbypzSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 *z, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 X = read_Float2(x, i); Float2 Z = read_Float2(z, i); Z.x += a.x*X.x - a.y*X.y; Z.y += a.y*X.x + a.x*X.y; Float2 Y = read_Float2(y, i); Z.x += b.x*Y.x - b.y*Y.y; Z.y += b.y*Y.x + b.x*Y.y; z[i] = make_Float2(Z); i += gridSize; } } __global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPBYPZ_FLOAT4(a, x0, b, y0, z0); CAXPBYPZ_FLOAT4(a, x1, b, y1, z1); CAXPBYPZ_FLOAT4(a, x2, b, y2, z2); CAXPBYPZ_FLOAT4(a, x3, b, y3, z3); CAXPBYPZ_FLOAT4(a, x4, b, y4, z4); CAXPBYPZ_FLOAT4(a, x5, b, y5, z5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(zH, zN, z, stride); i += gridSize; } } __global__ void caxpbypzHKernel(float2 a, float2 b, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPBYPZ_FLOAT2(a, x0, b, y0, z0); CAXPBYPZ_FLOAT2(a, x1, b, y1, z1); CAXPBYPZ_FLOAT2(a, x2, b, y2, z2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(zH, zN, z, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + z[i] void caxpbypzCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, cudaColorSpinorField &z) { checkSpinor(x,y); checkSpinor(x,z); int length = x.Length()/2; setBlock(27, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypzCuda(a, x.Even(), b, y.Even(), z.Even()); caxpbypzCuda(a, x.Odd(), b, y.Odd(), z.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); bindTexture(&x, &y, &z); caxpbypzDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), (double2*)z.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); caxpbypzSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), (float2*)z.V(), length); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 4*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); if (x.Nspin() == 4){ //wilson caxpbypzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered caxpbypzHKernel<<<blasGrid, blasBlock>>>(a2, b2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 4*x.RealLength()*x.Precision(); quda::blas_flops += 8*x.RealLength(); if (!blasTuning) checkCudaError(); } template <typename Float2> __global__ void caxpbypczpwDKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 c, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 W = read_Float2(w, i); Float2 X = READ_DOUBLE2_TEXTURE(x, i); CAXPY_DOUBLE2(a, X, W); Float2 Y = READ_DOUBLE2_TEXTURE(y, i); CAXPY_DOUBLE2(b, Y, W); Float2 Z = READ_DOUBLE2_TEXTURE(z, i); CAXPY_DOUBLE2(c, Z, W); w[i] = make_Float2(W); i += gridSize; } } template <typename Float2> __global__ void caxpbypczpwSKernel(Float2 a, Float2 *x, Float2 b, Float2 *y, Float2 c, Float2 *z, Float2 *w, int len) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < len) { Float2 W = read_Float2(w, i); Float2 X = read_Float2(x, i); CAXPY_FLOAT2(a, X, W); Float2 Y = read_Float2(y, i); CAXPY_FLOAT2(b, Y, W); Float2 Z = read_Float2(z, i); CAXPY_FLOAT2(c, Z, W); w[i] = make_Float2(W); i += gridSize; } } __global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short4 *yH, float *yN, short4 *zH, float *zN, short4* wH, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR(w, texHalf4, texNorm4, stride); RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); CAXPY_FLOAT4(a, x0, w0); CAXPY_FLOAT4(a, x1, w1); CAXPY_FLOAT4(a, x2, w2); CAXPY_FLOAT4(a, x3, w3); CAXPY_FLOAT4(a, x4, w4); CAXPY_FLOAT4(a, x5, w5); RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); CAXPY_FLOAT4(b, y0, w0); CAXPY_FLOAT4(b, y1, w1); CAXPY_FLOAT4(b, y2, w2); CAXPY_FLOAT4(b, y3, w3); CAXPY_FLOAT4(b, y4, w4); CAXPY_FLOAT4(b, y5, w5); RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); CAXPY_FLOAT4(c, z0, w0); CAXPY_FLOAT4(c, z1, w1); CAXPY_FLOAT4(c, z2, w2); CAXPY_FLOAT4(c, z3, w3); CAXPY_FLOAT4(c, z4, w4); CAXPY_FLOAT4(c, z5, w5); CONSTRUCT_HALF_SPINOR_FROM_SINGLE(wH, wN, w, stride); i += gridSize; } } __global__ void caxpbypczpwHKernel(float2 a, float2 b, float2 c, float *xN, short2 *yH, float *yN, short2 *zH, float *zN, short2 *wH, float *wN, int stride, int length) { unsigned int i = blockIdx.x*(blockDim.x) + threadIdx.x; unsigned int gridSize = gridDim.x*blockDim.x; while (i < length) { RECONSTRUCT_HALF_SPINOR_ST(w, texHalfSt4, texNorm4, stride); RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); CAXPY_FLOAT2(a, x0, w0); CAXPY_FLOAT2(a, x1, w1); CAXPY_FLOAT2(a, x2, w2); RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); CAXPY_FLOAT2(b, y0, w0); CAXPY_FLOAT2(b, y1, w1); CAXPY_FLOAT2(b, y2, w2); RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); CAXPY_FLOAT2(c, z0, w0); CAXPY_FLOAT2(c, z1, w1); CAXPY_FLOAT2(c, z2, w2); CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(wH, wN, w, stride); i += gridSize; } } // performs the operation z[i] = a*x[i] + b*y[i] + c*z[i] + w[i] void caxpbypczpwCuda(const quda::Complex &a, cudaColorSpinorField &x, const quda::Complex &b, cudaColorSpinorField &y, const quda::Complex &c, cudaColorSpinorField &z, cudaColorSpinorField &w) { checkSpinor(x,y); checkSpinor(x,z); checkSpinor(x,w); int length = x.Length()/2; setBlock(28, length, x.Precision()); if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) { caxpbypczpwCuda(a, x.Even(), b, y.Even(), c, z.Even(), w.Even()); caxpbypczpwCuda(a, x.Odd(), b, y.Odd(), c, z.Odd(), w.Odd()); return; } if (x.Precision() == QUDA_DOUBLE_PRECISION) { double2 a2 = make_double2(real(a), imag(a)); double2 b2 = make_double2(real(b), imag(b)); double2 c2 = make_double2(real(c), imag(c)); bindTexture(&x, &y, &z, &w); caxpbypczpwDKernel<<<blasGrid, blasBlock>>>(a2, (double2*)x.V(), b2, (double2*)y.V(), c2, (double2*)z.V(), (double2*)w.V(), length); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); float2 c2 = make_float2(real(c), imag(c)); caxpbypczpwSKernel<<<blasGrid, blasBlock>>>(a2, (float2*)x.V(), b2, (float2*)y.V(), c2, (float2*)z.V(), (float2*)w.V(), length); } else { bindTexture(&x, &y, &z, &w); quda::blas_bytes += 6*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); float2 b2 = make_float2(real(b), imag(b)); float2 c2 = make_float2(real(c), imag(c)); if (x.Nspin() == 4){ //wilson caxpbypczpwHKernel<<<blasGrid, blasBlock>>>(a2, b2, c2, (float*)x.Norm(), (short4*)y.V(), (float*)y.Norm(), (short4*)z.V(), (float*)z.Norm(), (short4*)w.V(), (float*)w.Norm(), z.Stride(), z.Volume()); } else if (x.Nspin() == 1){ //staggered caxpbypczpwHKernel<<<blasGrid, blasBlock>>>(a2, b2, c2, (float*)x.Norm(), (short2*)y.V(), (float*)y.Norm(), (short2*)z.V(), (float*)z.Norm(), (short2*)w.V(), (float*)w.Norm(), z.Stride(), z.Volume()); }else{ errorQuda("ERROR: nSpin=%d is not supported\n", x.Nspin()); } } quda::blas_bytes += 5*x.RealLength()*x.Precision(); quda::blas_flops += 12*x.RealLength(); if (!blasTuning) checkCudaError(); } // // double caxpyDotzyCuda(float a, float *x, float *y, float *z, n){} // // First performs the operation y[i] = a*x[i] + y[i] // Second returns the dot product (z,y) // template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyF##suffix #define REDUCE_TYPES Float2 a, Float2 *x, Float2 *y, Float2 *z, Float c #define REDUCE_PARAMS a, x, y, z, c #define REDUCE_REAL_AUXILIARY(i) y[i].x += a.x*x[i].x - a.y*x[i].y; #define REDUCE_IMAG_AUXILIARY(i) y[i].y += a.y*x[i].x + a.x*x[i].y; #define REDUCE_REAL_OPERATION(i) (z[i].x*y[i].x + z[i].y*y[i].y) #define REDUCE_IMAG_OPERATION(i) (z[i].x*y[i].y - z[i].y*y[i].x) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix #define REDUCE_TYPES Float2 a, short4 *yH, Float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR(x, texHalf1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR(y, texHalf2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR(z, texHalf3, texNorm3, stride); \ CAXPY_FLOAT4(a, x0, y0); \ CAXPY_FLOAT4(a, x1, y1); \ CAXPY_FLOAT4(a, x2, y2); \ CAXPY_FLOAT4(a, x3, y3); \ CAXPY_FLOAT4(a, x4, y4); \ CAXPY_FLOAT4(a, x5, y5); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE(yH, yN, y, stride); #define REDUCE_IMAG_AUXILIARY(i) \ REAL_DOT_FLOAT4(rdot0, z0, y0); \ REAL_DOT_FLOAT4(rdot1, z1, y1); \ REAL_DOT_FLOAT4(rdot2, z2, y2); \ REAL_DOT_FLOAT4(rdot3, z3, y3); \ REAL_DOT_FLOAT4(rdot4, z4, y4); \ REAL_DOT_FLOAT4(rdot5, z5, y5); \ IMAG_DOT_FLOAT4(idot0, z0, y0); \ IMAG_DOT_FLOAT4(idot1, z1, y1); \ IMAG_DOT_FLOAT4(idot2, z2, y2); \ IMAG_DOT_FLOAT4(idot3, z3, y3); \ IMAG_DOT_FLOAT4(idot4, z4, y4); \ IMAG_DOT_FLOAT4(idot5, z5, y5); \ rdot0 += rdot1; rdot2 += rdot3; rdot4 += rdot5; rdot0 += rdot2; rdot0 += rdot4; \ idot0 += idot1; idot2 += idot3; idot4 += idot5; idot0 += idot2; idot0 += idot4; #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION template <unsigned int reduce_threads, typename Float, typename Float2> #define REDUCE_FUNC_NAME(suffix) caxpyDotzyH##suffix #define REDUCE_TYPES Float2 a, short2 *yH, Float *yN, int stride #define REDUCE_PARAMS a, yH, yN, stride #define REDUCE_REAL_AUXILIARY(i) \ RECONSTRUCT_HALF_SPINOR_ST(x, texHalfSt1, texNorm1, stride); \ RECONSTRUCT_HALF_SPINOR_ST(y, texHalfSt2, texNorm2, stride); \ RECONSTRUCT_HALF_SPINOR_ST(z, texHalfSt3, texNorm3, stride); \ CAXPY_FLOAT2(a, x0, y0); \ CAXPY_FLOAT2(a, x1, y1); \ CAXPY_FLOAT2(a, x2, y2); \ CONSTRUCT_HALF_SPINOR_FROM_SINGLE_ST(yH, yN, y, stride); #define REDUCE_IMAG_AUXILIARY(i) \ REAL_DOT_FLOAT2(rdot0, z0, y0); \ REAL_DOT_FLOAT2(rdot1, z1, y1); \ REAL_DOT_FLOAT2(rdot2, z2, y2); \ IMAG_DOT_FLOAT2(idot0, z0, y0); \ IMAG_DOT_FLOAT2(idot1, z1, y1); \ IMAG_DOT_FLOAT2(idot2, z2, y2); \ rdot0 += rdot1; rdot0 += rdot2; \ idot0 += idot1; idot0 += idot2; #define REDUCE_REAL_OPERATION(i) (rdot0) #define REDUCE_IMAG_OPERATION(i) (idot0) #include "reduce_complex_core.h" #undef REDUCE_FUNC_NAME #undef REDUCE_TYPES #undef REDUCE_PARAMS #undef REDUCE_REAL_AUXILIARY #undef REDUCE_IMAG_AUXILIARY #undef REDUCE_REAL_OPERATION #undef REDUCE_IMAG_OPERATION quda::Complex caxpyDotzyCuda(const quda::Complex &a, cudaColorSpinorField &x, cudaColorSpinorField &y, cudaColorSpinorField &z) { if (x.SiteSubset() == QUDA_FULL_SITE_SUBSET) return caxpyDotzyCuda(a, x.Even(), y.Even(), z.Even()) + caxpyDotzyCuda(a, x.Odd(), y.Odd(), z.Odd()); const int id = 29; quda::blas_flops += 8*x.RealLength(); checkSpinor(x,y); quda::blas_bytes += 4*x.RealLength()*x.Precision(); double2 dot; if (x.Precision() == QUDA_DOUBLE_PRECISION) { char c = 0; double2 a2 = make_double2(real(a), imag(a)); dot = caxpyDotzyFCuda(a2, (double2*)x.V(), (double2*)y.V(), (double2*)z.V(), c, x.Length()/2, id, x.Precision()); } else if (x.Precision() == QUDA_SINGLE_PRECISION) { char c = 0; float2 a2 = make_float2(real(a), imag(a)); dot = caxpyDotzyFCuda(a2, (float2*)x.V(), (float2*)y.V(), (float2*)z.V(), c, x.Length()/2, id, x.Precision()); } else { bindTexture(&x, &y, &z); quda::blas_bytes += 3*x.Volume()*sizeof(float); float2 a2 = make_float2(real(a), imag(a)); if (x.Nspin() == 4){ //wilson dot = caxpyDotzyHCuda(a2, (short4*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else if (x.Nspin() == 1){ //staggered dot = caxpyDotzyHCuda(a2, (short2*)y.V(), (float*)y.Norm(), x.Stride(), x.Volume(), id, x.Precision()); }else{ errorQuda("%s: nSpin(%d) is not supported\n", __FUNCTION__, x.Nspin()); } } return quda::Complex(dot.x, dot.y); }
7ee298416fa0e39127ee2cfc275c6622122cb6c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <hip/hip_complex.h> #include <iostream> #include <stdio.h> __global__ void Map(const hipDoubleComplex *A, hipDoubleComplex *B, int numElements) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < numElements){ B[index] = cuCmul(cuConj(A[index]),B[index]); } } __global__ void reduce(hipDoubleComplex *B_idata, hipDoubleComplex *B_odata) { extern __shared__ hipDoubleComplex SM[]; int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; SM[tid] = B_idata[index]; __syncthreads(); for(int s = 1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0) { SM[tid] = cuCadd(SM[tid],SM[tid + s]); } __syncthreads(); } if (tid == 0) { B_odata[blockIdx.x] = SM[0]; } } __global__ void myZdotc(hipDoubleComplex *A, hipDoubleComplex *B, hipDoubleComplex *O, int numElements) { int blocksize = 128; int gridsize = ((numElements - 1) / blocksize) + 1; int SMsize = blocksize * sizeof(hipDoubleComplex); hipLaunchKernelGGL(( Map) , dim3(gridsize), dim3(blocksize) , 0, 0, A,B,numElements); hipDeviceSynchronize(); hipDoubleComplex *temp; while (gridsize > 0) { hipLaunchKernelGGL(( reduce) , dim3(gridsize), dim3(blocksize), SMsize , 0, B,A); hipDeviceSynchronize(); temp = A; A = B; B = temp; gridsize >>= 7; } O[0] = B[0]; } int main() { int numElements = pow(2,21); size_t size = numElements * sizeof(hipDoubleComplex); size_t size1 = sizeof(hipDoubleComplex); hipDoubleComplex *h_A, *h_B, *h_O; hipDoubleComplex *d_A, *d_B, *d_O; hipHostMalloc((hipDoubleComplex**)&h_A, size); hipHostMalloc((hipDoubleComplex**)&h_B, size); hipHostMalloc((hipDoubleComplex**)&h_O, size1); hipMalloc((void**)&d_A, size); hipMalloc((void**)&d_B, size); hipMalloc((void**)&d_O, size1); for(int i = 0; i < numElements; i++) { h_A[i] = make_cuDoubleComplex(2.25,2.25); h_B[i] = make_cuDoubleComplex(2.25,2.25); } hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); hipMemcpy(d_O, h_O, size1, hipMemcpyHostToDevice); hipLaunchKernelGGL(( myZdotc) , dim3(1), dim3(1) , 0, 0, d_A,d_B,d_O,numElements); hipMemcpy(h_O, d_O, size1, hipMemcpyDeviceToHost); std::cout << cuCreal(h_O[0]) << ":" << cuCimag(h_O[0]) <<'\n'; hipFree(d_A); hipFree(d_B); hipFree(d_O); hipHostFree(h_A); hipHostFree(h_B); hipHostFree(h_O); hipDeviceReset(); return 0; }
7ee298416fa0e39127ee2cfc275c6622122cb6c7.cu
#include <math.h> #include <cuComplex.h> #include <iostream> #include <stdio.h> __global__ void Map(const cuDoubleComplex *A, cuDoubleComplex *B, int numElements) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < numElements){ B[index] = cuCmul(cuConj(A[index]),B[index]); } } __global__ void reduce(cuDoubleComplex *B_idata, cuDoubleComplex *B_odata) { extern __shared__ cuDoubleComplex SM[]; int tid = threadIdx.x; int index = blockIdx.x * blockDim.x + threadIdx.x; SM[tid] = B_idata[index]; __syncthreads(); for(int s = 1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0) { SM[tid] = cuCadd(SM[tid],SM[tid + s]); } __syncthreads(); } if (tid == 0) { B_odata[blockIdx.x] = SM[0]; } } __global__ void myZdotc(cuDoubleComplex *A, cuDoubleComplex *B, cuDoubleComplex *O, int numElements) { int blocksize = 128; int gridsize = ((numElements - 1) / blocksize) + 1; int SMsize = blocksize * sizeof(cuDoubleComplex); Map <<< gridsize, blocksize >>> (A,B,numElements); cudaDeviceSynchronize(); cuDoubleComplex *temp; while (gridsize > 0) { reduce <<< gridsize, blocksize, SMsize >>>(B,A); cudaDeviceSynchronize(); temp = A; A = B; B = temp; gridsize >>= 7; } O[0] = B[0]; } int main() { int numElements = pow(2,21); size_t size = numElements * sizeof(cuDoubleComplex); size_t size1 = sizeof(cuDoubleComplex); cuDoubleComplex *h_A, *h_B, *h_O; cuDoubleComplex *d_A, *d_B, *d_O; cudaMallocHost((cuDoubleComplex**)&h_A, size); cudaMallocHost((cuDoubleComplex**)&h_B, size); cudaMallocHost((cuDoubleComplex**)&h_O, size1); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_O, size1); for(int i = 0; i < numElements; i++) { h_A[i] = make_cuDoubleComplex(2.25,2.25); h_B[i] = make_cuDoubleComplex(2.25,2.25); } cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaMemcpy(d_O, h_O, size1, cudaMemcpyHostToDevice); myZdotc <<< 1, 1 >>> (d_A,d_B,d_O,numElements); cudaMemcpy(h_O, d_O, size1, cudaMemcpyDeviceToHost); std::cout << cuCreal(h_O[0]) << ":" << cuCimag(h_O[0]) <<'\n'; cudaFree(d_A); cudaFree(d_B); cudaFree(d_O); cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_O); cudaDeviceReset(); return 0; }
13dd337943515a1b5f4c58e6278222079505d479.hip
// !!! This is a file automatically generated by hipify!!! // generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, hipStream_t stream);
13dd337943515a1b5f4c58e6278222079505d479.cu
// generated by gen_cuda_conv_bias_kern_impls.py #include "../conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4.cuinl" template void megdnn::cuda::conv_bias_int8::do_conv_bias_int8_implicit_gemm_imma16x16x16_cdiv4hwn4<PerChannelBiasVisitor, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>>>( const int8_t* d_src, const int8_t* d_filter, PerChannelBiasVisitor bias, IConvEpilogue<Activation<megdnn::param_enumv::ConvBias::NonlineMode::IDENTITY>> epilogue, const ConvParam& param, float alpha, float beta, cudaStream_t stream);
e16d23e8561226b3e86388807f357712f0641cfd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global) { int index = blockIdx.x * blockDim.x + threadIdx.x ; int sum = 0; int j=0; for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock)) { sum = sum + d_array[i]; j++; } extern __shared__ int d_blockMemmory[]; d_blockMemmory[threadIdx.x] = sum; sum =0; __syncthreads(); if(threadIdx.x == 0) { for(int i =0; i<numberOfThreadsPerBlock;i++) { sum = sum+ d_blockMemmory[i]; } d_global[blockIdx.x] = sum; } }
e16d23e8561226b3e86388807f357712f0641cfd.cu
#include "includes.h" __global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global) { int index = blockIdx.x * blockDim.x + threadIdx.x ; int sum = 0; int j=0; for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock)) { sum = sum + d_array[i]; j++; } extern __shared__ int d_blockMemmory[]; d_blockMemmory[threadIdx.x] = sum; sum =0; __syncthreads(); if(threadIdx.x == 0) { for(int i =0; i<numberOfThreadsPerBlock;i++) { sum = sum+ d_blockMemmory[i]; } d_global[blockIdx.x] = sum; } }
e123483219d0be9815f66e53437ed471ab081b54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef HAMC_SCRATCH_H #define HAMC_SCRATCH_H #include "../../src/hamc/hamc_cpu_code.c" void printMatrix(uint8_t *mat, int n); __global__ void fixRow(HAMC_DATA_TYPE_t *matrix, int size, int rowId) { // The ith row of the matrix __shared__ HAMC_DATA_TYPE_t Ri[512]; // The diagonal element for ith row __shared__ HAMC_DATA_TYPE_t Aii; int colId = threadIdx.x; Ri[colId] = matrix[size * rowId + colId]; Aii = matrix[size * rowId + colId]; __syncthreads(); // Divide the whole row by the diagonal element making sure it is not 0 Ri[colId] ^= Aii; matrix[size * rowId + colId] = Ri[colId]; } __global__ void fixColumn(HAMC_DATA_TYPE_t *matrix, int size, int colId) { int i = threadIdx.x; int j = blockIdx.x; // The colId column __shared__ HAMC_DATA_TYPE_t col[512]; // The jth element of the colId row __shared__ HAMC_DATA_TYPE_t AColIdj; // The jth column __shared__ HAMC_DATA_TYPE_t colj[512]; col[i] = matrix[i * size + colId]; if (col[i] != 0) { colj[i] = matrix[i * size + j]; AColIdj = matrix[colId * size + j]; if (i != colId) { //colj[i] = colj[i] - AColIdj * col[i]; colj[i] ^= AColIdj & col[i]; } matrix[i * size + j] = colj[i]; } } int getIndex(int cols, int row, int col) { return row*cols + col; } int main() { const int n = 4; // creating input uint8_t *h_A = new uint8_t[n*n]; bin_matrix CPU = mat_init_cpu(n,n); uint8_t val; int seed = 11; // create random nxn binary matrix srand(seed); for ( int i = 0; i < n*2; i++) { val = rand() %2; h_A[i] = val; CPU->data[i] = val; } printf("h_A matrix:\n"); printMatrix(h_A,n); printf("\n"); /* copy host matrix to GPU */ HAMC_DATA_TYPE_t *d_matrix; hipMalloc((void **) &d_matrix, n * n * sizeof(HAMC_DATA_TYPE_t)); hipMemcpy(d_matrix, h_A, n*n*sizeof(HAMC_DATA_TYPE_t), hipMemcpyHostToDevice); int j = 0; while (j < n) { // Find k where matrix[k][j] is not 0 for (int k = 0; k < CPU->rows; k++) { if (h_A[getIndex(n, k, j)] == 1) { //fix row hipLaunchKernelGGL(( fixRow), dim3(1),dim3(n), 0, 0, d_matrix, n, k); //fix column hipLaunchKernelGGL(( fixColumn), dim3(1),dim3(n), 0, 0, d_matrix, n, j); } } j++; } bin_matrix h_B = mat_init_cpu(n,n); hipMemcpy(h_B->data, d_matrix, n*n*sizeof(HAMC_DATA_TYPE_t), hipMemcpyDeviceToHost); printf("GPU output matrix:\n"); printMatrix(h_B->data,n); printf("\n"); bin_matrix CPU_out = circ_matrix_inverse_cpu(CPU); printf("CPU output matrix:\n"); printMatrix(CPU_out->data,n); printf("\n"); free(h_A); free(h_B); free(CPU); free(CPU_out); return 0; } void printMatrix(uint8_t *mat, int n) { for ( int i = 0; i < n; i++) { for ( int j = 0; j < n; j++) { printf("%d ", mat[i*n+j]); } printf("\n"); } } #endif /* HAMC_SCRATCH_H */
e123483219d0be9815f66e53437ed471ab081b54.cu
#ifndef HAMC_SCRATCH_H #define HAMC_SCRATCH_H #include "../../src/hamc/hamc_cpu_code.c" void printMatrix(uint8_t *mat, int n); __global__ void fixRow(HAMC_DATA_TYPE_t *matrix, int size, int rowId) { // The ith row of the matrix __shared__ HAMC_DATA_TYPE_t Ri[512]; // The diagonal element for ith row __shared__ HAMC_DATA_TYPE_t Aii; int colId = threadIdx.x; Ri[colId] = matrix[size * rowId + colId]; Aii = matrix[size * rowId + colId]; __syncthreads(); // Divide the whole row by the diagonal element making sure it is not 0 Ri[colId] ^= Aii; matrix[size * rowId + colId] = Ri[colId]; } __global__ void fixColumn(HAMC_DATA_TYPE_t *matrix, int size, int colId) { int i = threadIdx.x; int j = blockIdx.x; // The colId column __shared__ HAMC_DATA_TYPE_t col[512]; // The jth element of the colId row __shared__ HAMC_DATA_TYPE_t AColIdj; // The jth column __shared__ HAMC_DATA_TYPE_t colj[512]; col[i] = matrix[i * size + colId]; if (col[i] != 0) { colj[i] = matrix[i * size + j]; AColIdj = matrix[colId * size + j]; if (i != colId) { //colj[i] = colj[i] - AColIdj * col[i]; colj[i] ^= AColIdj & col[i]; } matrix[i * size + j] = colj[i]; } } int getIndex(int cols, int row, int col) { return row*cols + col; } int main() { const int n = 4; // creating input uint8_t *h_A = new uint8_t[n*n]; bin_matrix CPU = mat_init_cpu(n,n); uint8_t val; int seed = 11; // create random nxn binary matrix srand(seed); for ( int i = 0; i < n*2; i++) { val = rand() %2; h_A[i] = val; CPU->data[i] = val; } printf("h_A matrix:\n"); printMatrix(h_A,n); printf("\n"); /* copy host matrix to GPU */ HAMC_DATA_TYPE_t *d_matrix; cudaMalloc((void **) &d_matrix, n * n * sizeof(HAMC_DATA_TYPE_t)); cudaMemcpy(d_matrix, h_A, n*n*sizeof(HAMC_DATA_TYPE_t), cudaMemcpyHostToDevice); int j = 0; while (j < n) { // Find k where matrix[k][j] is not 0 for (int k = 0; k < CPU->rows; k++) { if (h_A[getIndex(n, k, j)] == 1) { //fix row fixRow<<<1,n>>>(d_matrix, n, k); //fix column fixColumn<<<1,n>>>(d_matrix, n, j); } } j++; } bin_matrix h_B = mat_init_cpu(n,n); cudaMemcpy(h_B->data, d_matrix, n*n*sizeof(HAMC_DATA_TYPE_t), cudaMemcpyDeviceToHost); printf("GPU output matrix:\n"); printMatrix(h_B->data,n); printf("\n"); bin_matrix CPU_out = circ_matrix_inverse_cpu(CPU); printf("CPU output matrix:\n"); printMatrix(CPU_out->data,n); printf("\n"); free(h_A); free(h_B); free(CPU); free(CPU_out); return 0; } void printMatrix(uint8_t *mat, int n) { for ( int i = 0; i < n; i++) { for ( int j = 0; j < n; j++) { printf("%d ", mat[i*n+j]); } printf("\n"); } } #endif /* HAMC_SCRATCH_H */
260290a127608f0bdb6d8855fc09c75f58c63d41.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include <string.h> #include "bmp.h" #include "omp.h" extern "C" void LoadBMPFile(uchar3 **img, BMPHeader *hdr, BMPInfoHeader *infoHdr, const char *name); extern "C" void WriteBMPFile(uchar3 **img, BMPHeader hdr, BMPInfoHeader infoHdr, const char *name); #define idx(A,B) ((A) * cols + (B)) typedef struct pixel { float x, y, z; } Pixel; const int tile_size = 4; __global__ void filter(Pixel *myimg, Pixel *oimg, int rows, int cols) { __shared__ Pixel temp[tile_size+2][tile_size+2]; int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int xx = threadIdx.x+1; int yy = threadIdx.y+1; int Dx = x+blockDim.x < cols ? blockDim.x : cols-x-1; int Dy = y+blockDim.y < rows ? blockDim.y : rows-y-1; // Copy pixels to shared memory if (x < cols && y < rows) { // Interior pixels temp[yy][xx] = myimg[idx(y,x)]; // Left & right side pixels if(threadIdx.x == 0) { temp[yy][0] = myimg[idx(y,x-1)]; temp[yy][Dx+1] = myimg[idx(y,x+Dx)]; } // Top & bottom pixels if(threadIdx.y == 0) { temp[0][xx] = myimg[idx(y-1,x)]; temp[Dy+1][xx] = myimg[idx(y+Dy,x)]; } // Corner pixels if(threadIdx.x == 0 && threadIdx.y == 0) { temp[0][0] = myimg[idx(y-1,x-1)]; temp[0][Dx+1] = myimg[idx(y-1,x+Dx)]; temp[Dy+1][0] = myimg[idx(y+Dy,x-1)]; temp[Dy+1][Dx+1] = myimg[idx(y+Dy,x+Dx)]; } } __syncthreads(); // Compute stencil for the block if (x > 0 && x < cols-1 && y > 0 && y < rows-1) { Pixel result; result.x = 0; result.y = 0; result.z = 0; for(int dy = -1; dy <=1; dy++) { for(int dx = -1; dx <=1; dx++) { result.x += temp[yy+dy][xx+dx].x; result.y += temp[yy+dy][xx+dx].y; result.z += temp[yy+dy][xx+dx].z; } } oimg[idx(y,x)].x = result.x/9; oimg[idx(y,x)].y = result.y/9; oimg[idx(y,x)].z = result.z/9; } } double apply_stencil(const int rows, const int cols, Pixel * const in, Pixel * const out) { Pixel *d_in, *d_out; hipMalloc(&d_in, rows*cols*sizeof(Pixel)); hipMalloc(&d_out, rows*cols*sizeof(Pixel)); hipMemcpy(d_in, in, rows*cols*sizeof(Pixel), hipMemcpyHostToDevice); const dim3 blockSize(tile_size,tile_size,1); const dim3 gridSize((cols+tile_size-1)/tile_size,(rows+tile_size-1)/tile_size,1); double tstart, tend; tstart = omp_get_wtime(); hipLaunchKernelGGL(( filter), dim3(gridSize), dim3(blockSize), 0, 0, d_in, d_out, rows, cols); hipDeviceSynchronize(); tend = omp_get_wtime(); hipMemcpy(out, d_out, rows*cols*sizeof(Pixel), hipMemcpyDeviceToHost); return(tend-tstart); } // main read, call filter, write new image int main(int argc, char **argv) { BMPHeader hdr; BMPInfoHeader infoHdr; uchar3 *bimg; Pixel *img,*oimg; uint64_t x,y; uint64_t img_size; //double start, end; if(argc != 2) { printf("Usage: %s imageName\n", argv[0]); return 1; } LoadBMPFile(&bimg, &hdr, &infoHdr, argv[1]); printf("Data init done: size = %d, width = %d, height = %d.\n", hdr.size, infoHdr.width, infoHdr.height); img_size = infoHdr.width * infoHdr.height * sizeof(Pixel); img = (Pixel *) malloc(img_size); if (img == NULL) { printf("Error Cant alloc image space\n"); exit(-1); } memset(img,0,img_size); oimg = (Pixel *) malloc(img_size); if (oimg == NULL) { printf("Error Cant alloc output image space\n"); exit(-1); } memset(oimg,0,img_size); printf("Convert image\n"); // convert to floats for processing int rows = infoHdr.height; int cols = infoHdr.width; for (y=0; y<rows; y++) for (x=0; x<cols; x++) { img[idx(y,x)].x = bimg[idx(y,x)].x/255.0; img[idx(y,x)].y = bimg[idx(y,x)].y/255.0; img[idx(y,x)].z = bimg[idx(y,x)].z/255.0; } double runtime; runtime = apply_stencil(infoHdr.height, infoHdr.width, img, oimg); printf("time for stencil = %f seconds\n",runtime); // clear bitmap array memset(bimg,0,infoHdr.height*infoHdr.width*3); double err = 0.0; // convert to uchar3 for output printf("rows %d cols %d\n",rows, cols); for (y=0; y<rows; y++) for (x=0; x<cols; x++) { bimg[idx(y,x)].x = oimg[idx(y,x)].x*255; bimg[idx(y,x)].y = oimg[idx(y,x)].y*255; bimg[idx(y,x)].z = oimg[idx(y,x)].z*255; err += (img[idx(y,x)].x - oimg[idx(y,x)].x); err += (img[idx(y,x)].y - oimg[idx(y,x)].y); err += (img[idx(y,x)].z - oimg[idx(y,x)].z); } printf("Cummulative error between images %g\n",err); // write the output file WriteBMPFile(&bimg, hdr,infoHdr, "./img-new.bmp"); }
260290a127608f0bdb6d8855fc09c75f58c63d41.cu
#include <math.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <strings.h> #include <string.h> #include "bmp.h" #include "omp.h" extern "C" void LoadBMPFile(uchar3 **img, BMPHeader *hdr, BMPInfoHeader *infoHdr, const char *name); extern "C" void WriteBMPFile(uchar3 **img, BMPHeader hdr, BMPInfoHeader infoHdr, const char *name); #define idx(A,B) ((A) * cols + (B)) typedef struct pixel { float x, y, z; } Pixel; const int tile_size = 4; __global__ void filter(Pixel *myimg, Pixel *oimg, int rows, int cols) { __shared__ Pixel temp[tile_size+2][tile_size+2]; int x = threadIdx.x + blockDim.x*blockIdx.x; int y = threadIdx.y + blockDim.y*blockIdx.y; int xx = threadIdx.x+1; int yy = threadIdx.y+1; int Dx = x+blockDim.x < cols ? blockDim.x : cols-x-1; int Dy = y+blockDim.y < rows ? blockDim.y : rows-y-1; // Copy pixels to shared memory if (x < cols && y < rows) { // Interior pixels temp[yy][xx] = myimg[idx(y,x)]; // Left & right side pixels if(threadIdx.x == 0) { temp[yy][0] = myimg[idx(y,x-1)]; temp[yy][Dx+1] = myimg[idx(y,x+Dx)]; } // Top & bottom pixels if(threadIdx.y == 0) { temp[0][xx] = myimg[idx(y-1,x)]; temp[Dy+1][xx] = myimg[idx(y+Dy,x)]; } // Corner pixels if(threadIdx.x == 0 && threadIdx.y == 0) { temp[0][0] = myimg[idx(y-1,x-1)]; temp[0][Dx+1] = myimg[idx(y-1,x+Dx)]; temp[Dy+1][0] = myimg[idx(y+Dy,x-1)]; temp[Dy+1][Dx+1] = myimg[idx(y+Dy,x+Dx)]; } } __syncthreads(); // Compute stencil for the block if (x > 0 && x < cols-1 && y > 0 && y < rows-1) { Pixel result; result.x = 0; result.y = 0; result.z = 0; for(int dy = -1; dy <=1; dy++) { for(int dx = -1; dx <=1; dx++) { result.x += temp[yy+dy][xx+dx].x; result.y += temp[yy+dy][xx+dx].y; result.z += temp[yy+dy][xx+dx].z; } } oimg[idx(y,x)].x = result.x/9; oimg[idx(y,x)].y = result.y/9; oimg[idx(y,x)].z = result.z/9; } } double apply_stencil(const int rows, const int cols, Pixel * const in, Pixel * const out) { Pixel *d_in, *d_out; cudaMalloc(&d_in, rows*cols*sizeof(Pixel)); cudaMalloc(&d_out, rows*cols*sizeof(Pixel)); cudaMemcpy(d_in, in, rows*cols*sizeof(Pixel), cudaMemcpyHostToDevice); const dim3 blockSize(tile_size,tile_size,1); const dim3 gridSize((cols+tile_size-1)/tile_size,(rows+tile_size-1)/tile_size,1); double tstart, tend; tstart = omp_get_wtime(); filter<<<gridSize, blockSize>>>(d_in, d_out, rows, cols); cudaDeviceSynchronize(); tend = omp_get_wtime(); cudaMemcpy(out, d_out, rows*cols*sizeof(Pixel), cudaMemcpyDeviceToHost); return(tend-tstart); } // main read, call filter, write new image int main(int argc, char **argv) { BMPHeader hdr; BMPInfoHeader infoHdr; uchar3 *bimg; Pixel *img,*oimg; uint64_t x,y; uint64_t img_size; //double start, end; if(argc != 2) { printf("Usage: %s imageName\n", argv[0]); return 1; } LoadBMPFile(&bimg, &hdr, &infoHdr, argv[1]); printf("Data init done: size = %d, width = %d, height = %d.\n", hdr.size, infoHdr.width, infoHdr.height); img_size = infoHdr.width * infoHdr.height * sizeof(Pixel); img = (Pixel *) malloc(img_size); if (img == NULL) { printf("Error Cant alloc image space\n"); exit(-1); } memset(img,0,img_size); oimg = (Pixel *) malloc(img_size); if (oimg == NULL) { printf("Error Cant alloc output image space\n"); exit(-1); } memset(oimg,0,img_size); printf("Convert image\n"); // convert to floats for processing int rows = infoHdr.height; int cols = infoHdr.width; for (y=0; y<rows; y++) for (x=0; x<cols; x++) { img[idx(y,x)].x = bimg[idx(y,x)].x/255.0; img[idx(y,x)].y = bimg[idx(y,x)].y/255.0; img[idx(y,x)].z = bimg[idx(y,x)].z/255.0; } double runtime; runtime = apply_stencil(infoHdr.height, infoHdr.width, img, oimg); printf("time for stencil = %f seconds\n",runtime); // clear bitmap array memset(bimg,0,infoHdr.height*infoHdr.width*3); double err = 0.0; // convert to uchar3 for output printf("rows %d cols %d\n",rows, cols); for (y=0; y<rows; y++) for (x=0; x<cols; x++) { bimg[idx(y,x)].x = oimg[idx(y,x)].x*255; bimg[idx(y,x)].y = oimg[idx(y,x)].y*255; bimg[idx(y,x)].z = oimg[idx(y,x)].z*255; err += (img[idx(y,x)].x - oimg[idx(y,x)].x); err += (img[idx(y,x)].y - oimg[idx(y,x)].y); err += (img[idx(y,x)].z - oimg[idx(y,x)].z); } printf("Cummulative error between images %g\n",err); // write the output file WriteBMPFile(&bimg, hdr,infoHdr, "./img-new.bmp"); }
6363b4d8a44db411340da736ce5aa82bf87fb938.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_blas_interface.h" #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" namespace oneflow { namespace { hipblasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) { hipblasOperation_t cublas_trans; if (trans == CBLAS_TRANSPOSE::CblasNoTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_N; } else if (trans == CBLAS_TRANSPOSE::CblasTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_T; } else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) { cublas_trans = hipblasOperation_t::HIPBLAS_OP_C; } else { // do nothing } return cublas_trans; } std::tuple<int, int, int, hipblasOperation_t, hipblasOperation_t> PrepareToCallCublasGemm( enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k) { int lda = (trans_a == CblasNoTrans) ? k : m; int ldb = (trans_b == CblasNoTrans) ? n : k; int ldc = n; hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int lda, ldb, ldc; hipblasOperation_t cublas_trans_a, cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); hipblasHandle_t handle; if (std::is_same<T, half>::value) { handle = ctx->cublas_tensor_op_math_handle(); } else { handle = ctx->cublas_pmh_handle(); } const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, a, lda, &beta_val, c, ldc); } template<> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const half* a, const half* b, const double beta, half* c) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); int lda, ldb, ldc; hipblasOperation_t cublas_trans_a, cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); if (GetCudaSmVersion() >= 500) { OF_CUBLAS_CHECK(hipblasGemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_f, c, HIP_R_16F, ldc, HIP_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); } else { OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, HIP_R_16F, ldb, a, HIP_R_16F, lda, &beta_f, c, HIP_R_16F, ldc)); } } std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a, const Blob* c) { const auto& a_shape = a->shape_view(); const auto& c_shape = c->shape_view(); int m = c_shape.At(0); int n = c_shape.Count(1); int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0); return std::make_tuple(m, n, k); } std::tuple<int, int, int, int, int, int, hipblasOperation_t, hipblasOperation_t> PrepareToCallBatchedGemm(const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k) { const int a_stride = m * k; const int b_stride = k * n; const int c_stride = m * n; const int lda = (trans_a == CblasNoTrans) ? k : m; const int ldb = (trans_b == CblasNoTrans) ? n : k; const int ldc = n; hipblasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); hipblasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> hipDataType GetCudaDataType4BatchedGemm() { return CudaDataType<T>::value; } template<> hipDataType GetCudaDataType4BatchedGemm<half>() { return HIP_R_16F; } template<typename T> void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int a_stride, b_stride, c_stride; int lda, ldb, ldc; const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); hipblasOperation_t cublas_trans_a, cublas_trans_b; std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallBatchedGemm(trans_a, trans_b, batch_size, m, n, k); if (TORCH_HIP_VERSION >= 9010 && GetCudaSmVersion() >= 500) { #if TORCH_HIP_VERSION >= 9010 hipDataType data_type = GetCudaDataType4BatchedGemm<T>(); OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx( ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, reinterpret_cast<const void*>(&alpha_val), reinterpret_cast<const void*>(b), data_type, ldb, b_stride, reinterpret_cast<const void*>(a), data_type, lda, a_stride, reinterpret_cast<const void*>(&beta_val), reinterpret_cast<void*>(c), data_type, ldc, c_stride, batch_size, data_type, HIPBLAS_GEMM_DEFAULT)); #endif } else { cublas_gemmStridedBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, b_stride, a, lda, a_stride, &beta_val, c, ldc, c_stride, batch_size); } } #if TORCH_HIP_VERSION >= 9010 template<> void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k, const double alpha, const half* a, const half* b, const double beta, half* c) { int a_stride, b_stride, c_stride; int lda, ldb, ldc; hipblasOperation_t cublas_trans_a, cublas_trans_b; std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallBatchedGemm(trans_a, trans_b, batch_size, m, n, k); if (GetCudaSmVersion() >= 500) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); #if TORCH_HIP_VERSION >= 11000 hipblasGemmAlgo_t algo = HIPBLAS_GEMM_DEFAULT; #else hipblasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP; #endif OF_CUBLAS_CHECK(hipblasGemmStridedBatchedEx( ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, reinterpret_cast<const void*>(b), HIP_R_16F, ldb, b_stride, reinterpret_cast<const void*>(a), HIP_R_16F, lda, a_stride, &beta_f, reinterpret_cast<void*>(c), HIP_R_16F, ldc, c_stride, batch_size, HIP_R_32F, algo)); } else { const half alpha_h = static_cast<half>(alpha); const half beta_h = static_cast<half>(beta); cublas_gemmStridedBatched<half>(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_h, b, ldb, b_stride, a, lda, a_stride, &beta_h, c, ldc, c_stride, batch_size); } } #endif __global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y, const int incy) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); } #else HALF_CHECK_FAILED; #endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) } __global__ void AxpyHalf2Gpu(const int n, const half alpha, const half* x, half* y) { const int h2_n = n / 2; const auto* x_h2 = reinterpret_cast<const half2*>(x); auto* y_h2 = reinterpret_cast<half2*>(y); half2 alpha_h2 = __half2half2(alpha); CUDA_1D_KERNEL_LOOP(i, h2_n) { y_h2[i] = __hfma2(alpha_h2, x_h2[i], y_h2[i]); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int last_idx = n - 1; y[last_idx] = __hfma(alpha, x[last_idx], y[last_idx]); } } } // namespace void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x, const int incx, float* y, const int incy) { cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha, const double* x, const int incx, double* y, const int incy) { cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha, const float16* x, const int incx, float16* y, const int incy) { if (incx == 1 && incy == 1) { hipLaunchKernelGGL(( AxpyHalf2Gpu), dim3(BlocksNum4ThreadsNum(n / 2)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, float16_2half(alpha), reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } else { hipLaunchKernelGGL(( AxpyHalfGpu), dim3(BlocksNum4ThreadsNum(n)), dim3(kCudaThreadsNumPerBlock), 0, ctx->cuda_stream(), n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y), incy); } } } // namespace oneflow
6363b4d8a44db411340da736ce5aa82bf87fb938.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/kernel/util/cuda_blas_interface.h" #include "oneflow/core/device/cuda_util.h" #include "oneflow/core/register/blob.h" #include "oneflow/core/kernel/util/cuda_half_util.h" namespace oneflow { namespace { cublasOperation_t CblasTrans2CublasTrans(CBLAS_TRANSPOSE trans) { cublasOperation_t cublas_trans; if (trans == CBLAS_TRANSPOSE::CblasNoTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_N; } else if (trans == CBLAS_TRANSPOSE::CblasTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_T; } else if (trans == CBLAS_TRANSPOSE::CblasConjTrans) { cublas_trans = cublasOperation_t::CUBLAS_OP_C; } else { // do nothing } return cublas_trans; } std::tuple<int, int, int, cublasOperation_t, cublasOperation_t> PrepareToCallCublasGemm( enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k) { int lda = (trans_a == CblasNoTrans) ? k : m; int ldb = (trans_b == CblasNoTrans) ? n : k; int ldc = n; cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int lda, ldb, ldc; cublasOperation_t cublas_trans_a, cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); cublasHandle_t handle; if (std::is_same<T, half>::value) { handle = ctx->cublas_tensor_op_math_handle(); } else { handle = ctx->cublas_pmh_handle(); } const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); cublas_gemm<T>(handle, cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, a, lda, &beta_val, c, ldc); } template<> void Gemm(DeviceCtx* ctx, const enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const half* a, const half* b, const double beta, half* c) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); int lda, ldb, ldc; cublasOperation_t cublas_trans_a, cublas_trans_b; std::tie(lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallCublasGemm(trans_a, trans_b, m, n, k); if (GetCudaSmVersion() >= 500) { OF_CUBLAS_CHECK(cublasGemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_f, c, CUDA_R_16F, ldc, CUDA_R_32F, CUBLAS_GEMM_DFALT_TENSOR_OP)); } else { OF_CUBLAS_CHECK(cublasSgemmEx(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, b, CUDA_R_16F, ldb, a, CUDA_R_16F, lda, &beta_f, c, CUDA_R_16F, ldc)); } } std::tuple<int, int, int> CalcMNKForGemm(enum CBLAS_TRANSPOSE trans_a, const Blob* a, const Blob* c) { const auto& a_shape = a->shape_view(); const auto& c_shape = c->shape_view(); int m = c_shape.At(0); int n = c_shape.Count(1); int k = (trans_a == CblasNoTrans) ? a_shape.Count(1) : a_shape.At(0); return std::make_tuple(m, n, k); } std::tuple<int, int, int, int, int, int, cublasOperation_t, cublasOperation_t> PrepareToCallBatchedGemm(const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k) { const int a_stride = m * k; const int b_stride = k * n; const int c_stride = m * n; const int lda = (trans_a == CblasNoTrans) ? k : m; const int ldb = (trans_b == CblasNoTrans) ? n : k; const int ldc = n; cublasOperation_t cublas_trans_a = CblasTrans2CublasTrans(trans_a); cublasOperation_t cublas_trans_b = CblasTrans2CublasTrans(trans_b); return std::make_tuple(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b); } template<typename T> cudaDataType_t GetCudaDataType4BatchedGemm() { return CudaDataType<T>::value; } template<> cudaDataType_t GetCudaDataType4BatchedGemm<half>() { return CUDA_R_16F; } template<typename T> void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k, const double alpha, const T* a, const T* b, const double beta, T* c) { int a_stride, b_stride, c_stride; int lda, ldb, ldc; const T alpha_val = static_cast<T>(alpha); const T beta_val = static_cast<T>(beta); cublasOperation_t cublas_trans_a, cublas_trans_b; std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallBatchedGemm(trans_a, trans_b, batch_size, m, n, k); if (CUDA_VERSION >= 9010 && GetCudaSmVersion() >= 500) { #if CUDA_VERSION >= 9010 cudaDataType_t data_type = GetCudaDataType4BatchedGemm<T>(); OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx( ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, reinterpret_cast<const void*>(&alpha_val), reinterpret_cast<const void*>(b), data_type, ldb, b_stride, reinterpret_cast<const void*>(a), data_type, lda, a_stride, reinterpret_cast<const void*>(&beta_val), reinterpret_cast<void*>(c), data_type, ldc, c_stride, batch_size, data_type, CUBLAS_GEMM_DEFAULT)); #endif } else { cublas_gemmStridedBatched<T>(ctx->cublas_pmh_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_val, b, ldb, b_stride, a, lda, a_stride, &beta_val, c, ldc, c_stride, batch_size); } } #if CUDA_VERSION >= 9010 template<> void BatchedGemmImpl(DeviceCtx* ctx, const enum CBLAS_ORDER order, const enum CBLAS_TRANSPOSE trans_a, const enum CBLAS_TRANSPOSE trans_b, int batch_size, int m, int n, int k, const double alpha, const half* a, const half* b, const double beta, half* c) { int a_stride, b_stride, c_stride; int lda, ldb, ldc; cublasOperation_t cublas_trans_a, cublas_trans_b; std::tie(a_stride, b_stride, c_stride, lda, ldb, ldc, cublas_trans_a, cublas_trans_b) = PrepareToCallBatchedGemm(trans_a, trans_b, batch_size, m, n, k); if (GetCudaSmVersion() >= 500) { const float alpha_f = static_cast<float>(alpha); const float beta_f = static_cast<float>(beta); #if CUDA_VERSION >= 11000 cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT; #else cublasGemmAlgo_t algo = CUBLAS_GEMM_DFALT_TENSOR_OP; #endif OF_CUBLAS_CHECK(cublasGemmStridedBatchedEx( ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_f, reinterpret_cast<const void*>(b), CUDA_R_16F, ldb, b_stride, reinterpret_cast<const void*>(a), CUDA_R_16F, lda, a_stride, &beta_f, reinterpret_cast<void*>(c), CUDA_R_16F, ldc, c_stride, batch_size, CUDA_R_32F, algo)); } else { const half alpha_h = static_cast<half>(alpha); const half beta_h = static_cast<half>(beta); cublas_gemmStridedBatched<half>(ctx->cublas_tensor_op_math_handle(), cublas_trans_b, cublas_trans_a, n, m, k, &alpha_h, b, ldb, b_stride, a, lda, a_stride, &beta_h, c, ldc, c_stride, batch_size); } } #endif __global__ void AxpyHalfGpu(const int n, const half alpha, const half* x, const int incx, half* y, const int incy) { #if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) CUDA_1D_KERNEL_LOOP(i, n) { y[i * incy] = __hfma(alpha, x[i * incx], y[i * incy]); } #else HALF_CHECK_FAILED; #endif // __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__) } __global__ void AxpyHalf2Gpu(const int n, const half alpha, const half* x, half* y) { const int h2_n = n / 2; const auto* x_h2 = reinterpret_cast<const half2*>(x); auto* y_h2 = reinterpret_cast<half2*>(y); half2 alpha_h2 = __half2half2(alpha); CUDA_1D_KERNEL_LOOP(i, h2_n) { y_h2[i] = __hfma2(alpha_h2, x_h2[i], y_h2[i]); } if (n % 2 != 0 && blockIdx.x == 0 && threadIdx.x == 0) { const int last_idx = n - 1; y[last_idx] = __hfma(alpha, x[last_idx], y[last_idx]); } } } // namespace void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { Gemm<float>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { Gemm<double>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { Gemm<half>(ctx, CblasRowMajor, trans_a, trans_b, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const float* a, const float* b, const double beta, float* c) { BatchedGemmImpl<float>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const double* a, const double* b, const double beta, double* c) { BatchedGemmImpl<double>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, a, b, beta, c); } void BlasIf<DeviceType::kGPU>::OFBatchedGemm(DeviceCtx* ctx, enum CBLAS_TRANSPOSE trans_a, enum CBLAS_TRANSPOSE trans_b, const int batch_size, const int m, const int n, const int k, const double alpha, const float16* a, const float16* b, const double beta, float16* c) { BatchedGemmImpl<half>(ctx, CblasRowMajor, trans_a, trans_b, batch_size, m, n, k, alpha, reinterpret_cast<const half*>(a), reinterpret_cast<const half*>(b), beta, reinterpret_cast<half*>(c)); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float alpha, const float* x, const int incx, float* y, const int incy) { cublas_axpy<float>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const double alpha, const double* x, const int incx, double* y, const int incy) { cublas_axpy<double>(ctx->cublas_pmh_handle(), n, &alpha, x, incx, y, incy); } void BlasIf<DeviceType::kGPU>::Axpy(DeviceCtx* ctx, const int n, const float16 alpha, const float16* x, const int incx, float16* y, const int incy) { if (incx == 1 && incy == 1) { AxpyHalf2Gpu<<<BlocksNum4ThreadsNum(n / 2), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, float16_2half(alpha), reinterpret_cast<const half*>(x), reinterpret_cast<half*>(y)); } else { AxpyHalfGpu<<<BlocksNum4ThreadsNum(n), kCudaThreadsNumPerBlock, 0, ctx->cuda_stream()>>>( n, float16_2half(alpha), reinterpret_cast<const half*>(x), incx, reinterpret_cast<half*>(y), incy); } } } // namespace oneflow
19b91bc9c43c0523cbf7e73d0208e23a0c9bdb9d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void _kgauss32d(int nx, int ns, int nd, float *x, float *s, float *k, float g) { int i, j, n, xj, sj; double d, dd; i = threadIdx.x + blockIdx.x * blockDim.x; n = nx*ns; while (i < n) { xj = (i % nx)*nd; sj = (i / nx)*nd; dd = 0; for (j = 0; j < nd; j++) { d = x[xj++]-s[sj++]; dd += d*d; } k[i] = exp(-g * dd); i += blockDim.x * gridDim.x; } }
19b91bc9c43c0523cbf7e73d0208e23a0c9bdb9d.cu
#include "includes.h" __global__ void _kgauss32d(int nx, int ns, int nd, float *x, float *s, float *k, float g) { int i, j, n, xj, sj; double d, dd; i = threadIdx.x + blockIdx.x * blockDim.x; n = nx*ns; while (i < n) { xj = (i % nx)*nd; sj = (i / nx)*nd; dd = 0; for (j = 0; j < nd; j++) { d = x[xj++]-s[sj++]; dd += d*d; } k[i] = exp(-g * dd); i += blockDim.x * gridDim.x; } }
c898b244be2e920e19db8a7424450b594622d3dc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <cstdio> #include "../common/cpu_bitmap.h" #define INF 2e10f #define DIM 1024 #define NSPHERES 20 struct Sphere { float x, y, z; float r, g, b; float radius; Sphere() {}; __device__ float hit(int ox, int oy, float* n) const { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz/radius; return dz + z; } return -INF; } }; __constant__ Sphere dev_spheres[NSPHERES]; __global__ void kernel(unsigned char* bitmap) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*blockDim.x*gridDim.x; float ox = x - DIM/2; float oy = y - DIM/2; float r = 0, g = 0, b = 0; float maxz = -INF; for (int i = 0; i < NSPHERES; i++) { float n; float z = dev_spheres[i].hit(ox, oy, &n); if (z > maxz) { r = dev_spheres[i].r*n; g = dev_spheres[i].g*n; b = dev_spheres[i].b*n; maxz = z; } } bitmap[offset*4 + 0] = (int)(r*255); bitmap[offset*4 + 1] = (int)(g*255); bitmap[offset*4 + 2] = (int)(b*255); bitmap[offset*4 + 3] = 255; } #define rnd(x) (x*rand() / RAND_MAX) int main() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); CPUBitmap bitmap(DIM, DIM); unsigned char* dev_bitmap; hipMalloc((void**)&dev_bitmap, bitmap.image_size()); Sphere* spheres = (Sphere*)malloc(NSPHERES*sizeof(Sphere)); for (int i = 0; i < NSPHERES; i++) { spheres[i].r = rnd(1.0f); spheres[i].g = rnd(1.0f); spheres[i].b = rnd(1.0f); spheres[i].x = rnd(1000.0f) - 500; spheres[i].y = rnd(1000.0f) - 500; spheres[i].z = rnd(1000.0f) - 500; spheres[i].radius = rnd(100.0f) + 20; } hipMemcpyToSymbol(dev_spheres, spheres, NSPHERES*sizeof(Sphere)); dim3 gridDim(DIM/16, DIM/16); dim3 blockDim(16, 16); hipLaunchKernelGGL(( kernel), dim3(gridDim), dim3(blockDim), 0, 0, dev_bitmap); hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Time used: %.2fms\n", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); bitmap.display_and_exit(); free(spheres); hipFree(dev_bitmap); return 0; }
c898b244be2e920e19db8a7424450b594622d3dc.cu
#include <cmath> #include <cstdio> #include "../common/cpu_bitmap.h" #define INF 2e10f #define DIM 1024 #define NSPHERES 20 struct Sphere { float x, y, z; float r, g, b; float radius; Sphere() {}; __device__ float hit(int ox, int oy, float* n) const { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius*radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); *n = dz/radius; return dz + z; } return -INF; } }; __constant__ Sphere dev_spheres[NSPHERES]; __global__ void kernel(unsigned char* bitmap) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int offset = x + y*blockDim.x*gridDim.x; float ox = x - DIM/2; float oy = y - DIM/2; float r = 0, g = 0, b = 0; float maxz = -INF; for (int i = 0; i < NSPHERES; i++) { float n; float z = dev_spheres[i].hit(ox, oy, &n); if (z > maxz) { r = dev_spheres[i].r*n; g = dev_spheres[i].g*n; b = dev_spheres[i].b*n; maxz = z; } } bitmap[offset*4 + 0] = (int)(r*255); bitmap[offset*4 + 1] = (int)(g*255); bitmap[offset*4 + 2] = (int)(b*255); bitmap[offset*4 + 3] = 255; } #define rnd(x) (x*rand() / RAND_MAX) int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); CPUBitmap bitmap(DIM, DIM); unsigned char* dev_bitmap; cudaMalloc((void**)&dev_bitmap, bitmap.image_size()); Sphere* spheres = (Sphere*)malloc(NSPHERES*sizeof(Sphere)); for (int i = 0; i < NSPHERES; i++) { spheres[i].r = rnd(1.0f); spheres[i].g = rnd(1.0f); spheres[i].b = rnd(1.0f); spheres[i].x = rnd(1000.0f) - 500; spheres[i].y = rnd(1000.0f) - 500; spheres[i].z = rnd(1000.0f) - 500; spheres[i].radius = rnd(100.0f) + 20; } cudaMemcpyToSymbol(dev_spheres, spheres, NSPHERES*sizeof(Sphere)); dim3 gridDim(DIM/16, DIM/16); dim3 blockDim(16, 16); kernel<<<gridDim, blockDim>>>(dev_bitmap); cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Time used: %.2fms\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); bitmap.display_and_exit(); free(spheres); cudaFree(dev_bitmap); return 0; }
ef1d7bb535ee6e7885eab30132cfc4e6112d4c22.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Cuda kernel wrapper to call nvcomp on bslz4 data Jon Wright, ESRF, 2021. Use python to cat the nvcomp_extract.cu on the top of this file. */ inline __device__ uint32_t read32be( const uint8_t* address ) { return ( (uint32_t)(255 & (address)[0]) << 24 | (uint32_t)(255 & (address)[1]) << 16 | (uint32_t)(255 & (address)[2]) << 8 |(uint32_t)(255 & (address)[3]) ) ; } __global__ void h5lz4dc ( const uint8_t* const compressed, /* compressed data pointer */ const uint32_t* const block_starts, /* block start positions in compressed (bytes) */ const uint32_t num_blocks, /* number of blocks */ const uint32_t blocksize, /* blocksize in bytes */ uint8_t* const decompressed /* destination start pointer */ ) { const int blockid = blockIdx.x * blockDim.y + threadIdx.y; // Defined in ncvomp_extract __shared__ uint8_t buffer[DECOMP_INPUT_BUFFER_SIZE * DECOMP_CHUNKS_PER_BLOCK]; if (blockid < num_blocks) { decompressStream( buffer + threadIdx.y * DECOMP_INPUT_BUFFER_SIZE, decompressed + blockid * blocksize, // output start compressed + block_starts[blockid] + 4, // input starts read32be( compressed + block_starts[blockid] ) // numbers of compressed bytes ); } }
ef1d7bb535ee6e7885eab30132cfc4e6112d4c22.cu
/* Cuda kernel wrapper to call nvcomp on bslz4 data Jon Wright, ESRF, 2021. Use python to cat the nvcomp_extract.cu on the top of this file. */ inline __device__ uint32_t read32be( const uint8_t* address ) { return ( (uint32_t)(255 & (address)[0]) << 24 | (uint32_t)(255 & (address)[1]) << 16 | (uint32_t)(255 & (address)[2]) << 8 |(uint32_t)(255 & (address)[3]) ) ; } __global__ void h5lz4dc ( const uint8_t* const compressed, /* compressed data pointer */ const uint32_t* const block_starts, /* block start positions in compressed (bytes) */ const uint32_t num_blocks, /* number of blocks */ const uint32_t blocksize, /* blocksize in bytes */ uint8_t* const decompressed /* destination start pointer */ ) { const int blockid = blockIdx.x * blockDim.y + threadIdx.y; // Defined in ncvomp_extract __shared__ uint8_t buffer[DECOMP_INPUT_BUFFER_SIZE * DECOMP_CHUNKS_PER_BLOCK]; if (blockid < num_blocks) { decompressStream( buffer + threadIdx.y * DECOMP_INPUT_BUFFER_SIZE, decompressed + blockid * blocksize, // output start compressed + block_starts[blockid] + 4, // input starts read32be( compressed + block_starts[blockid] ) // numbers of compressed bytes ); } }
808aa9c37ce334f0a7c8d6e2875a5304bafd9349.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "yMaxDeltaIntegralFracKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *intData = NULL; hipMalloc(&intData, XSIZE*YSIZE); float *tmpArray = NULL; hipMalloc(&tmpArray, XSIZE*YSIZE); const int nWindows = 1; const int h = 1; const int w = 1; const float *xMin = NULL; hipMalloc(&xMin, XSIZE*YSIZE); const float *xMax = NULL; hipMalloc(&xMax, XSIZE*YSIZE); const float *yMax = NULL; hipMalloc(&yMax, XSIZE*YSIZE); const float *inData = NULL; hipMalloc(&inData, XSIZE*YSIZE); const int inDataStrideRow = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( yMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( yMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( yMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
808aa9c37ce334f0a7c8d6e2875a5304bafd9349.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "yMaxDeltaIntegralFracKernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; const float *intData = NULL; cudaMalloc(&intData, XSIZE*YSIZE); float *tmpArray = NULL; cudaMalloc(&tmpArray, XSIZE*YSIZE); const int nWindows = 1; const int h = 1; const int w = 1; const float *xMin = NULL; cudaMalloc(&xMin, XSIZE*YSIZE); const float *xMax = NULL; cudaMalloc(&xMax, XSIZE*YSIZE); const float *yMax = NULL; cudaMalloc(&yMax, XSIZE*YSIZE); const float *inData = NULL; cudaMalloc(&inData, XSIZE*YSIZE); const int inDataStrideRow = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); yMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { yMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { yMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,tmpArray,nWindows,h,w,xMin,xMax,yMax,inData,inDataStrideRow); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4a5b7633f6441f43c372c65efc557ba9e76e16f4.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "definitions.cuh" #include <time.h> #include "stdio.h" //Number of elements on which to perform CFD unsigned int Ni = 512; // Y elements unsigned int Nj = 512; // X elements unsigned int nIterations = 10000; // No Of Iterations unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2) int main(int argc, char** argv) { //Variables for Timing float cpuTime, gpuTime; // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) float *t = NULL, *t_prev = NULL; float *d_t = NULL,*d_t_prev= NULL; parseCommandLineArguments(argc, (char **)argv); printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); unsigned int size = Ni * Nj * sizeof(float); if(!initializeCPU(&t, &t_prev) ) { printf("\n Error in allocating memory on CPU!!!"); unInitializeCPU(&t, &t_prev); getchar(); return 0; } if (!initializeGPU(&d_t, &d_t_prev)) { printf("\n Error in allocating memory on GPU!!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); return 0; } //Perform CFD on CPU performCPUCFD(t,t_prev, &cpuTime); // To temporarily store CPU data. This is just for comparing with GPU output float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); memcpy(tempBuffer, t_prev, size); //Perform CFD on GPU if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) { printf("\n GPU Kernel failed !!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); return 0; } printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); printf("\n Finished Processing!!!"); getchar(); } void parseCommandLineArguments(int argc, char**argv) { if (argc >= 1) { for (int i=1; i < argc; i++) { int bFirstArgIsParam = false; int string_start = 0; while (argv[i][string_start] == '-') string_start++; char *string_argv = &argv[i][string_start]; if (!STRNCASECMP(string_argv, "Ni=", 3)) { bFirstArgIsParam = true; Ni = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "Nj=", 3)) { bFirstArgIsParam = true; Nj = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "iterations=", 11)) { bFirstArgIsParam = true; nIterations = atoi(&string_argv[11]); continue; } if (!STRNCASECMP(string_argv, "kernel=", 7)) { bFirstArgIsParam = true; kernelVersion = atoi(&string_argv[7]); continue; } if (!bFirstArgIsParam) { printf("Invalid arguments\n"); for (int n=0; n < argc; n++) { printf("argv[%d] = %s\n", n, argv[n]); } printf("\n"); exit(0); } } } if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) { fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); getchar(); exit(0); } } int initializeCPU(float **t, float **t_prev) { *t = (float*) calloc(Ni*Nj, sizeof(float)); *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); if((*t)==NULL || (*t_prev) == NULL) return 0; else return 1; } void unInitializeCPU(float **t, float **t_prev) { if((*t) !=NULL) free(*t); if((*t_prev) != NULL) free(*t_prev); } int initializeGPU(float **d_t, float **d_t_prev) { unsigned int size = Ni * Nj * sizeof(float); // Choose which GPU to run on, change this on a multi-GPU system. hipError_t cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); getchar(); return 0; } // Allocate GPU buffers. cudaStatus = hipMalloc((void**)&(*d_t), size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); return 0; } // Allocate GPU buffers . cudaStatus = hipMalloc((void**)&(*d_t_prev), size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = hipMemset((*d_t),0, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = hipMemset((*d_t_prev),0, size); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemset failed!"); getchar(); return 0; } return 1; } void unInitializeGPU(float **d_t, float **d_t_prev) { hipError_t cudaStatus; if((*d_t)!=NULL) cudaStatus = hipFree((*d_t)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipFree failed!"); return; } if((*d_t_prev)!=NULL) cudaStatus = hipFree((*d_t_prev)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipFree failed!"); return; } cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); getchar(); return; } } void performCPUCFD(float *t, float *t_prev, float *cpuTime) { float h,x,y; h = 1.0f/(Ni-1); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } float elapsedTimeInMs = 0.0f; clock_t start = clock(); for(unsigned int k=0;k<nIterations;k++) { for(unsigned int j=1;j<(Nj-1);j++) { for(unsigned int i=1;i<(Ni-1);i++) { t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] + t_prev[i*Nj+(j+1)] - 4*h*h); } } float* pingPong = t_prev; t_prev = t; t = pingPong; } clock_t end = clock(); elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC); printf("\n CPU Time:: %f ms", elapsedTimeInMs); *cpuTime = elapsedTimeInMs; } int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime) { float h,x,y; const char *str = (char*) malloc(1024); // To store error string //Decide how many blocks per thread and how many blocks per grid dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y); dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y); h = 1.0f/(Ni-1); memset(t_prev, 0, sizeof(float) * Ni * Nj); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } //Copy data to device hipMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , hipMemcpyHostToDevice); //Insert event to calculate time hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //This calls Version 1 of kernel which uses Global memory if(kernelVersion ==1) { hipEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateCFD_V1), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } hipEventRecord(stop, 0); hipEventSynchronize(stop); } //This calls Version 2 of kernel which uses optimization by copying data to shared memory else if(kernelVersion ==2) { hipEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( calculateCFD_V2), dim3(dimGrid),dim3(dimBlock), 0, 0, d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } hipEventRecord(stop, 0); hipEventSynchronize(stop); } float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("\n GPU Time:: %f ms", elapsedTime); *gpuTime = elapsedTime; hipError_t cudaStatus = hipMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); str = hipGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } return 1; } int checkHostEqualsDevice(float* o_host, float* o_device) { int flag =1; float tolerance = 0.0001f; //Compare the results for(unsigned int j=0;j<Nj;j++) { for(unsigned int i=0;i<Ni;i++) { if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) { printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); flag =0; //getchar(); } } } return flag; }
4a5b7633f6441f43c372c65efc557ba9e76e16f4.cu
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "definitions.cuh" #include <time.h> #include "stdio.h" //Number of elements on which to perform CFD unsigned int Ni = 512; // Y elements unsigned int Nj = 512; // X elements unsigned int nIterations = 10000; // No Of Iterations unsigned int kernelVersion =2; // Decides which GPU kernel version to call (Set it to 1 or 2) int main(int argc, char** argv) { //Variables for Timing float cpuTime, gpuTime; // CPU and GPU Pointers ( d_XX : refers to pointer pointing to GPU memory. This is just a convention) float *t = NULL, *t_prev = NULL; float *d_t = NULL,*d_t_prev= NULL; parseCommandLineArguments(argc, (char **)argv); printf("\n Ni= %d, Nj=%d nIteration=%d",Ni,Nj,nIterations); unsigned int size = Ni * Nj * sizeof(float); if(!initializeCPU(&t, &t_prev) ) { printf("\n Error in allocating memory on CPU!!!"); unInitializeCPU(&t, &t_prev); getchar(); return 0; } if (!initializeGPU(&d_t, &d_t_prev)) { printf("\n Error in allocating memory on GPU!!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); return 0; } //Perform CFD on CPU performCPUCFD(t,t_prev, &cpuTime); // To temporarily store CPU data. This is just for comparing with GPU output float *tempBuffer = (float*) calloc(Ni*Nj, sizeof(float)); memcpy(tempBuffer, t_prev, size); //Perform CFD on GPU if(!performGPUCFD(d_t,d_t_prev, t, t_prev, &gpuTime)) { printf("\n GPU Kernel failed !!!"); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); return 0; } printf("\n Is host equal to device = %d", checkHostEqualsDevice(tempBuffer,t)); printf("\n Speedup = %fx", (float)(cpuTime/gpuTime)); unInitializeCPU(&t, &t_prev); unInitializeGPU(&d_t, &d_t_prev); if(tempBuffer !=NULL) free(tempBuffer); printf("\n Finished Processing!!!"); getchar(); } void parseCommandLineArguments(int argc, char**argv) { if (argc >= 1) { for (int i=1; i < argc; i++) { int bFirstArgIsParam = false; int string_start = 0; while (argv[i][string_start] == '-') string_start++; char *string_argv = &argv[i][string_start]; if (!STRNCASECMP(string_argv, "Ni=", 3)) { bFirstArgIsParam = true; Ni = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "Nj=", 3)) { bFirstArgIsParam = true; Nj = atoi(&string_argv[3]); continue; } if (!STRNCASECMP(string_argv, "iterations=", 11)) { bFirstArgIsParam = true; nIterations = atoi(&string_argv[11]); continue; } if (!STRNCASECMP(string_argv, "kernel=", 7)) { bFirstArgIsParam = true; kernelVersion = atoi(&string_argv[7]); continue; } if (!bFirstArgIsParam) { printf("Invalid arguments\n"); for (int n=0; n < argc; n++) { printf("argv[%d] = %s\n", n, argv[n]); } printf("\n"); exit(0); } } } if(( Ni % THREADS_PER_BLOCK_Y != 0) || (Nj % THREADS_PER_BLOCK_X != 0)) { fprintf(stderr, "Please specify Ni & Nj as multiple of 16 !!!!"); getchar(); exit(0); } } int initializeCPU(float **t, float **t_prev) { *t = (float*) calloc(Ni*Nj, sizeof(float)); *t_prev = (float*) calloc(Ni*Nj, sizeof(float)); if((*t)==NULL || (*t_prev) == NULL) return 0; else return 1; } void unInitializeCPU(float **t, float **t_prev) { if((*t) !=NULL) free(*t); if((*t_prev) != NULL) free(*t_prev); } int initializeGPU(float **d_t, float **d_t_prev) { unsigned int size = Ni * Nj * sizeof(float); // Choose which GPU to run on, change this on a multi-GPU system. cudaError_t cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); getchar(); return 0; } // Allocate GPU buffers. cudaStatus = cudaMalloc((void**)&(*d_t), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Allocate GPU buffers . cudaStatus = cudaMalloc((void**)&(*d_t_prev), size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } // Memset GPU buffers cudaStatus = cudaMemset((*d_t_prev),0, size); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemset failed!"); getchar(); return 0; } return 1; } void unInitializeGPU(float **d_t, float **d_t_prev) { cudaError_t cudaStatus; if((*d_t)!=NULL) cudaStatus = cudaFree((*d_t)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } if((*d_t_prev)!=NULL) cudaStatus = cudaFree((*d_t_prev)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaFree failed!"); return; } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); getchar(); return; } } void performCPUCFD(float *t, float *t_prev, float *cpuTime) { float h,x,y; h = 1.0f/(Ni-1); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } float elapsedTimeInMs = 0.0f; clock_t start = clock(); for(unsigned int k=0;k<nIterations;k++) { for(unsigned int j=1;j<(Nj-1);j++) { for(unsigned int i=1;i<(Ni-1);i++) { t[i*Nj+j] = 0.25f * (t_prev[(i-1)*Nj+j] + t_prev[(i+1)*Nj+j] + t_prev[i*Nj+(j-1)] + t_prev[i*Nj+(j+1)] - 4*h*h); } } float* pingPong = t_prev; t_prev = t; t = pingPong; } clock_t end = clock(); elapsedTimeInMs = (float)((end - start) * 1000 / CLOCKS_PER_SEC); printf("\n CPU Time:: %f ms", elapsedTimeInMs); *cpuTime = elapsedTimeInMs; } int performGPUCFD(float *d_t, float *d_t_prev, float *t, float *t_prev, float*gpuTime) { float h,x,y; const char *str = (char*) malloc(1024); // To store error string //Decide how many blocks per thread and how many blocks per grid dim3 dimBlock(THREADS_PER_BLOCK_X,THREADS_PER_BLOCK_Y); dim3 dimGrid(Nj/dimBlock.x,Ni/dimBlock.y); h = 1.0f/(Ni-1); memset(t_prev, 0, sizeof(float) * Ni * Nj); for(unsigned int i=0;i<Ni;i++) { x = i*h; t_prev[i*Nj+0] = x*x; t_prev[i*Nj+(Nj-1)] = x*x + 1.0f; } for(unsigned int j=0;j < Nj; j++) { y = j*h; t_prev[0*Nj+j] = y*y; t_prev[((Ni-1) * Nj) + j] = 1.0f + y*y; } //Copy data to device cudaMemcpy(d_t_prev, t_prev, sizeof(float) * Ni * Nj , cudaMemcpyHostToDevice); //Insert event to calculate time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //This calls Version 1 of kernel which uses Global memory if(kernelVersion ==1) { cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V1<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } //This calls Version 2 of kernel which uses optimization by copying data to shared memory else if(kernelVersion ==2) { cudaEventRecord(start, 0); for(unsigned int k=0;k<nIterations;k++) { // Launch a kernel on the GPU with one thread for each element. calculateCFD_V2<<<dimGrid,dimBlock>>>(d_t_prev,d_t, Ni, Nj, h); float* pingPong = d_t_prev; d_t_prev = d_t; d_t = pingPong; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); } float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("\n GPU Time:: %f ms", elapsedTime); *gpuTime = elapsedTime; cudaError_t cudaStatus = cudaMemcpy(t, d_t_prev, sizeof(float) * Ni * Nj , cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); str = cudaGetErrorString(cudaStatus); fprintf(stderr, "CUDA Error!:: %s\n", str); getchar(); return 0; } return 1; } int checkHostEqualsDevice(float* o_host, float* o_device) { int flag =1; float tolerance = 0.0001f; //Compare the results for(unsigned int j=0;j<Nj;j++) { for(unsigned int i=0;i<Ni;i++) { if( (o_host[i*Nj+j] - o_device[i*Nj+j]) >= tolerance || (o_host[i*Nj+j] - o_device[i*Nj+j]) <= -tolerance) { printf("\n D=[%f]!=H=[%f] since Diff > tol %f for [%d][%d]",o_device[i*Nj+j], o_host[i*Nj+j],tolerance, i, j); flag =0; //getchar(); } } } return flag; }
554dbb90b66aa4138888c322b16aeb1654c3c0d3.hip
// !!! This is a file automatically generated by hipify!!! /* * * nbody.cu * * N-body example that illustrates gravitational simulation. * This is the type of computation that GPUs excel at: * parallelizable, with lots of FLOPS per unit of external * memory bandwidth required. * * Build with: nvcc -I ../chLib nbody.cu nbody_CPU_SSE.cpp nbody_CPU_AOS.cpp nbody_CPU_AOS_tiled.cpp nbody_CPU_SSE_threaded.cpp nbody_CPU_SOA.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu * On Linux: nvcc -I ../chLib nbody.cu nbody_CPU_SSE.cpp nbody_CPU_AOS.cpp nbody_CPU_AOS_tiled.cpp nbody_CPU_SSE_threaded.cpp nbody_CPU_SOA.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu -lcudart_static -ldl -lrt * Requires: No minimum SM requirement. If SM 3.x is not available, * this application quietly replaces the shuffle and fast-atomic * implementations with the shared memory implementation. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> // for kbhit() #include <ch_conio.h> #include <math.h> #include <chCommandLine.h> #include <chError.h> #include <chThread.h> #include <chTimer.h> #include "nbody.h" #include "bodybodyInteraction.cuh" using namespace cudahandbook::threading; inline void randomVector( float v[3] ) { float lenSqr; do { v[0] = rand() / (float) RAND_MAX * 2 - 1; v[1] = rand() / (float) RAND_MAX * 2 - 1; v[2] = rand() / (float) RAND_MAX * 2 - 1; lenSqr = v[0]*v[0]+v[1]*v[1]+v[2]*v[2]; } while ( lenSqr > 1.0f ); } void randomUnitBodies( float *pos, float *vel, size_t N ) { for ( size_t i = 0; i < N; i++ ) { randomVector( &pos[4*i] ); randomVector( &vel[4*i] ); pos[4*i+3] = 1.0f; // unit mass vel[4*i+3] = 1.0f; } } template<typename T> static float relError( float a, float b ) { if ( a == b ) return 0.0f; return fabsf(a-b)/b; } bool g_bCUDAPresent; bool g_bSM30Present; float *g_hostAOS_PosMass; float *g_hostAOS_VelInvMass; float *g_hostAOS_Force; float *g_hostAOS_gpuCrossCheckForce[32]; float *g_dptrAOS_PosMass; float *g_dptrAOS_Force; // // threshold for soft comparisons when validating // that forces add up to 0. // double g_ZeroThreshold; bool g_bGPUTest; // Buffer to hold the golden version of the forces, used for comparison // Along with timing results, we report the maximum relative error with // respect to this array. float *g_hostAOS_Force_Golden; float *g_hostSOA_Pos[3]; float *g_hostSOA_Force[3]; float *g_hostSOA_Mass; float *g_hostSOA_InvMass; size_t g_N; float g_softening = 0.1f; float g_damping = 0.995f; float g_dt = 0.016f; template<typename T> static T relError( T a, T b ) { if ( a == b ) return 0.0f; T relErr = (a-b)/b; // Manually take absolute value return (relErr<0.0f) ? -relErr : relErr; } #include "nbody_CPU_AOS.h" #include "nbody_CPU_AOS_tiled.h" #include "nbody_CPU_SOA.h" #include "nbody_CPU_SIMD.h" #ifndef NO_CUDA #include "nbody_GPU_AOS.cuh" #include "nbody_GPU_AOS_const.cuh" #include "nbody_GPU_AOS_tiled.cuh" #include "nbody_GPU_AOS_tiled_const.cuh" //#include "nbody_GPU_SOA_tiled.cuh" #include "nbody_GPU_Shuffle.cuh" #include "nbody_GPU_Atomic_hip.cuh" #endif #include "kahan.h" void integrateGravitation_AOS( float *ppos, float *pvel, float *pforce, float dt, float damping, size_t N ) { for ( size_t i = 0; i < N; i++ ) { int index = 4*i; int indexForce = 3*i; float pos[3], vel[3], force[3]; pos[0] = ppos[index+0]; pos[1] = ppos[index+1]; pos[2] = ppos[index+2]; float invMass = pvel[index+3]; vel[0] = pvel[index+0]; vel[1] = pvel[index+1]; vel[2] = pvel[index+2]; force[0] = pforce[indexForce+0]; force[1] = pforce[indexForce+1]; force[2] = pforce[indexForce+2]; // acceleration = force / mass; // new velocity = old velocity + acceleration * deltaTime vel[0] += (force[0] * invMass) * dt; vel[1] += (force[1] * invMass) * dt; vel[2] += (force[2] * invMass) * dt; vel[0] *= damping; vel[1] *= damping; vel[2] *= damping; // new position = old position + velocity * deltaTime pos[0] += vel[0] * dt; pos[1] += vel[1] * dt; pos[2] += vel[2] * dt; ppos[index+0] = pos[0]; ppos[index+1] = pos[1]; ppos[index+2] = pos[2]; pvel[index+0] = vel[0]; pvel[index+1] = vel[1]; pvel[index+2] = vel[2]; } } enum nbodyAlgorithm_enum g_Algorithm; // // g_maxAlgorithm is used to determine when to rotate g_Algorithm back to CPU_AOS // If CUDA is present, it is CPU_SIMD_threaded, otherwise it depends on SM version // // The shuffle and tiled implementations are SM 3.0 only. // // The CPU and GPU algorithms must be contiguous, and the logic in main() to // initialize this value must be modified if any new algorithms are added. // enum nbodyAlgorithm_enum g_maxAlgorithm; bool g_bCrossCheck = true; bool g_bUseSIMDForCrossCheck = true; bool g_bNoCPU = false; bool g_bGPUCrossCheck = false; bool g_bGPUCrossCheckFile = false; FILE *g_fGPUCrosscheckInput; FILE *g_fGPUCrosscheckOutput; bool ComputeGravitation( float *ms, float *maxRelError, nbodyAlgorithm_enum algorithm, bool bCrossCheck ) { hipError_t status; bool bSOA = false; // AOS -> SOA data structures in case we are measuring SOA performance for ( size_t i = 0; i < g_N; i++ ) { g_hostSOA_Pos[0][i] = g_hostAOS_PosMass[4*i+0]; g_hostSOA_Pos[1][i] = g_hostAOS_PosMass[4*i+1]; g_hostSOA_Pos[2][i] = g_hostAOS_PosMass[4*i+2]; g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3]; g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i]; } if ( bCrossCheck ) { #ifdef HAVE_SIMD_THREADED if ( g_bUseSIMDForCrossCheck ) { ComputeGravitation_SIMD_threaded( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); for ( size_t i = 0; i < g_N; i++ ) { g_hostAOS_Force_Golden[3*i+0] = g_hostSOA_Force[0][i]; g_hostAOS_Force_Golden[3*i+1] = g_hostSOA_Force[1][i]; g_hostAOS_Force_Golden[3*i+2] = g_hostSOA_Force[2][i]; } } else { #endif ComputeGravitation_AOS( g_hostAOS_Force_Golden, g_hostAOS_PosMass, g_softening*g_softening, g_N ); #ifdef HAVE_SIMD_THREADED } #endif } // CPU->GPU copies in case we are measuring GPU performance if ( g_bCUDAPresent ) { cuda(MemcpyAsync( g_dptrAOS_PosMass, g_hostAOS_PosMass, 4*g_N*sizeof(float), hipMemcpyHostToDevice ) ); } switch ( algorithm ) { case CPU_AOS: *ms = ComputeGravitation_AOS( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case CPU_AOS_tiled: *ms = ComputeGravitation_AOS_tiled( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case CPU_SOA: *ms = ComputeGravitation_SOA( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #ifdef HAVE_SIMD case CPU_SIMD: *ms = ComputeGravitation_SIMD( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifdef HAVE_SIMD_THREADED case CPU_SIMD_threaded: *ms = ComputeGravitation_SIMD_threaded( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifdef HAVE_SIMD_OPENMP case CPU_SIMD_openmp: *ms = ComputeGravitation_SIMD_openmp( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifndef NO_CUDA case GPU_AOS: *ms = ComputeGravitation_GPU_AOS( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; case GPU_AOS_tiled: *ms = ComputeGravitation_GPU_AOS_tiled( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; case GPU_AOS_tiled_const: *ms = ComputeGravitation_GPU_AOS_tiled_const( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; #if 0 // commented out - too slow even on SM 3.0 case GPU_Atomic: cuda(Memset( g_dptrAOS_Force, 0, 3*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Atomic( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; #endif case GPU_Shared: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Shared( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; case GPU_Const: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeNBodyGravitation_GPU_AOS_const( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; case GPU_Shuffle: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Shuffle( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), hipMemcpyDeviceToHost ) ); break; case multiGPU_SingleCPUThread: memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) ); *ms = ComputeGravitation_multiGPU_singlethread( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case multiGPU_MultiCPUThread: memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) ); *ms = ComputeGravitation_multiGPU_threaded( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; #endif default: fprintf(stderr, "Unrecognized algorithm index: %d\n", algorithm); abort(); break; } if ( g_bGPUCrossCheck ) { int cDisagreements = 0; for ( int i = 0; i < g_numGPUs; i++ ) { for ( int j = 1; j < g_numGPUs; j++ ) { if ( memcmp( g_hostAOS_gpuCrossCheckForce[i], g_hostAOS_gpuCrossCheckForce[j], 3*g_N*sizeof(float) ) ) { fprintf( stderr, "GPU %d and GPU %d disagreed\n", i, j ); cDisagreements += 1; } } } if ( cDisagreements ) { goto Error; } } // SOA -> AOS if ( bSOA ) { for ( size_t i = 0; i < g_N; i++ ) { g_hostAOS_Force[3*i+0] = g_hostSOA_Force[0][i]; g_hostAOS_Force[3*i+1] = g_hostSOA_Force[1][i]; g_hostAOS_Force[3*i+2] = g_hostSOA_Force[2][i]; } } integrateGravitation_AOS( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_hostAOS_Force, g_dt, g_damping, g_N ); if ( g_bGPUCrossCheck && g_fGPUCrosscheckInput ) { if ( memcmp( g_hostAOS_Force, g_hostAOS_Force_Golden, 3*g_N*sizeof(float) ) ) { printf( "GPU CROSSCHECK FAILURE: Disagreement with golden values\n" ); goto Error; } } *maxRelError = 0.0f; if ( bCrossCheck ) { float max = 0.0f; for ( size_t i = 0; i < 3*g_N; i++ ) { float err = relError( g_hostAOS_Force[i], g_hostAOS_Force_Golden[i] ); if ( err > max ) { max = err; } } *maxRelError = max; } else { KahanAdder sumX; KahanAdder sumY; KahanAdder sumZ; for ( size_t i = 0; i < g_N; i++ ) { sumX += g_hostAOS_Force[i*3+0]; sumY += g_hostAOS_Force[i*3+1]; sumZ += g_hostAOS_Force[i*3+2]; } *maxRelError = ::max( fabs(sumX), ::max(fabs(sumY), fabs(sumZ)) ); if ( g_ZeroThreshold != 0.0 && fabs( *maxRelError ) > g_ZeroThreshold ) { printf( "Maximum sum of forces > threshold (%E > %E)\n", *maxRelError, g_ZeroThreshold ); goto Error; } } return true; Error: return false; } workerThread *g_CPUThreadPool; int g_numCPUCores; workerThread *g_GPUThreadPool; int g_numGPUs; struct gpuInit_struct { int iGPU; hipError_t status; }; void initializeGPU( void *_p ) { hipError_t status; gpuInit_struct *p = (gpuInit_struct *) _p; cuda(SetDevice( p->iGPU ) ); cuda(SetDeviceFlags( hipDeviceMapHost ) ); cuda(Free(0) ); Error: p->status = status; } int main( int argc, char *argv[] ) { hipError_t status; // kiloparticles int kParticles = 4, kMaxIterations = 0; if ( 1 == argc ) { printf( "Usage: nbody --numbodies <N> [--nocpu] [--nocrosscheck] [--iterations <N>]\n" ); printf( " --numbodies is multiplied by 1024 (default is 4)\n" ); printf( " By default, the app checks results against a CPU implementation; \n" ); printf( " disable this behavior with --nocrosscheck.\n" ); printf( " The CPU implementation may be disabled with --nocpu.\n" ); printf( " --nocpu implies --nocrosscheck.\n\n" ); printf( " --nosimd uses serial CPU implementation instead of SIMD.\n" ); printf( " --iterations specifies a fixed number of iterations to execute\n"); return 1; } // for reproducible results for a given N srand(7); { g_numCPUCores = processorCount(); g_CPUThreadPool = new workerThread[g_numCPUCores]; for ( size_t i = 0; i < g_numCPUCores; i++ ) { if ( ! g_CPUThreadPool[i].initialize( ) ) { fprintf( stderr, "Error initializing thread pool\n" ); return 1; } } } status = hipGetDeviceCount( &g_numGPUs ); g_bCUDAPresent = (hipSuccess == status) && (g_numGPUs > 0); if ( g_bCUDAPresent ) { hipDeviceProp_t prop; cuda(GetDeviceProperties( &prop, 0 ) ); g_bSM30Present = prop.major >= 3; } g_bNoCPU = chCommandLineGetBool( "nocpu", argc, argv ); if ( g_bNoCPU && ! g_bCUDAPresent ) { printf( "--nocpu specified, but no CUDA present...exiting\n" ); exit(1); } g_bCrossCheck = ! chCommandLineGetBool( "nocrosscheck", argc, argv ); if ( g_bNoCPU ) { g_bCrossCheck = false; } if ( g_bCrossCheck && chCommandLineGetBool( "nosse", argc, argv ) ) { g_bUseSIMDForCrossCheck = false; } chCommandLineGet( &kParticles, "numbodies", argc, argv ); g_N = kParticles*1024; chCommandLineGet( &kMaxIterations, "iterations", argc, argv); // Round down to the nearest multiple of the CPU count (e.g. if we have // a system with a CPU count that isn't a power of two, we need to round) g_N -= g_N % g_numCPUCores; if ( chCommandLineGetBool( "gpu-crosscheck", argc, argv ) ) { g_bGPUCrossCheck = true; } g_bGPUCrossCheck = chCommandLineGetBool( "gpu-crosscheck", argc, argv ); { char *szFilename; if ( chCommandLineGet( &szFilename, "gpu-crosscheck-input-file", argc, argv ) ) { if ( ! g_bGPUCrossCheck ) { fprintf( stderr, "GPU crosscheck input file requires --gpu-crosscheck\n" ); goto Error; } g_fGPUCrosscheckInput = fopen( szFilename, "rb" ); if ( ! g_fGPUCrosscheckInput ) { fprintf( stderr, "Could not open %s for input\n", szFilename ); goto Error; } { int version; if ( 1 != fread( &version, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of version failed\n" ); goto Error; } if ( version != NBODY_GOLDENFILE_VERSION ) { fprintf( stderr, "File version mismatch - generate new golden files!\n" ); goto Error; } } if ( 1 != fread( &g_N, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of particle count failed\n" ); goto Error; } if ( 1 != fread( &kMaxIterations, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of iteration count failed\n" ); goto Error; } printf( "%d iterations specified in input file\n", kMaxIterations ); } if ( chCommandLineGet( &szFilename, "gpu-crosscheck-output-file", argc, argv ) ) { if ( g_fGPUCrosscheckInput ) { fprintf( stderr, "Crosscheck input and output files are mutually exclusive. Please specify only one.\n" ); goto Error; } if ( ! g_bGPUCrossCheck ) { fprintf( stderr, "GPU crosscheck output file requires --gpu-crosscheck\n" ); goto Error; } g_fGPUCrosscheckOutput = fopen( szFilename, "wb" ); if ( ! g_fGPUCrosscheckOutput ) { fprintf( stderr, "Could not open %s for output\n", szFilename ); goto Error; } if ( ! kMaxIterations ) { fprintf( stderr, "Must specify --iterations when generating output file for GPU cross check.\n" ); goto Error; } { int version = NBODY_GOLDENFILE_VERSION; if ( 1 != fwrite( &version, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of version failed\n" ); goto Error; } } if ( 1 != fwrite( &g_N, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of particle count failed\n" ); goto Error; } if ( 1 != fwrite( &kMaxIterations, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of iteration count failed\n" ); goto Error; } } } chCommandLineGet( &g_ZeroThreshold, "zero", argc, argv ); if ( g_numGPUs ) { // optionally override GPU count from command line chCommandLineGet( &g_numGPUs, "numgpus", argc, argv ); g_GPUThreadPool = new workerThread[g_numGPUs]; for ( size_t i = 0; i < g_numGPUs; i++ ) { if ( ! g_GPUThreadPool[i].initialize( ) ) { fprintf( stderr, "Error initializing thread pool\n" ); return 1; } } for ( int i = 0; i < g_numGPUs; i++ ) { gpuInit_struct initGPU = {i}; g_GPUThreadPool[i].delegateSynchronous( initializeGPU, &initGPU ); if ( hipSuccess != initGPU.status ) { fprintf( stderr, "Initializing GPU %d failed " " with %d (%s)\n", i, initGPU.status, hipGetErrorString( initGPU.status ) ); return 1; } } } printf( "Running simulation with %d particles, crosscheck %s, CPU %s\n", (int) g_N, g_bCrossCheck ? "enabled" : "disabled", g_bNoCPU ? "disabled" : "enabled" ); #if defined(HAVE_SIMD_OPENMP) g_maxAlgorithm = CPU_SIMD_openmp; #elif defined(HAVE_SIMD_THREADED) g_maxAlgorithm = CPU_SIMD_threaded; #elif defined(HAVE_SIMD) g_maxAlgorithm = CPU_SIMD; #else g_maxAlgorithm = CPU_SOA; #endif g_Algorithm = g_bCUDAPresent ? GPU_AOS : g_maxAlgorithm; g_Algorithm = multiGPU_SingleCPUThread; if ( g_bCUDAPresent || g_bNoCPU ) { // max algorithm is different depending on whether SM 3.0 is present g_maxAlgorithm = g_bSM30Present ? GPU_AOS_tiled_const : multiGPU_MultiCPUThread; } if ( g_bCUDAPresent ) { hipDeviceProp_t propForVersion; cuda(SetDeviceFlags( hipDeviceMapHost ) ); cuda(GetDeviceProperties( &propForVersion, 0 ) ); if ( propForVersion.major < 3 ) { // Only SM 3.x supports shuffle and fast atomics, so we cannot run // some algorithms on this board. g_maxAlgorithm = multiGPU_MultiCPUThread; } cuda(HostAlloc( (void **) &g_hostAOS_PosMass, 4*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); for ( int i = 0; i < 3; i++ ) { cuda(HostAlloc( (void **) &g_hostSOA_Pos[i], g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_Force[i], g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); } cuda(HostAlloc( (void **) &g_hostAOS_Force, 3*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(HostAlloc( (void **) &g_hostAOS_Force_Golden, 3*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(HostAlloc( (void **) &g_hostAOS_VelInvMass, 4*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_Mass, g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_InvMass, g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); cuda(Malloc( &g_dptrAOS_PosMass, 4*g_N*sizeof(float) ) ); cuda(Malloc( (void **) &g_dptrAOS_Force, 3*g_N*sizeof(float) ) ); if ( g_bGPUCrossCheck ) { printf( "GPU cross check enabled (%d GPUs), disabling CPU\n", g_numGPUs ); g_bNoCPU = true; g_bCrossCheck = false; if ( g_numGPUs < 2 ) { fprintf( stderr, "GPU cross check enabled, but <2 GPUs available\n" ); goto Error; } for ( int i = 0; i < g_numGPUs; i++ ) { cuda(HostAlloc( (void **) (&g_hostAOS_gpuCrossCheckForce[i]), 3*g_N*sizeof(float), hipHostMallocPortable|hipHostMallocMapped ) ); } } } else { g_hostAOS_PosMass = new float[4*g_N]; for ( int i = 0; i < 3; i++ ) { g_hostSOA_Pos[i] = new float[g_N]; g_hostSOA_Force[i] = new float[g_N]; } g_hostSOA_Mass = new float[g_N]; g_hostAOS_Force = new float[3*g_N]; g_hostAOS_Force_Golden = new float[3*g_N]; g_hostAOS_VelInvMass = new float[4*g_N]; g_hostSOA_Mass = new float[g_N]; g_hostSOA_InvMass = new float[g_N]; } randomUnitBodies( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_N ); for ( size_t i = 0; i < g_N; i++ ) { g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3]; g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i]; } #if 0 // gather performance data over GPU implementations // for different problem sizes. printf( "kBodies\t" ); for ( int algorithm = GPU_AOS; algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]); algorithm++ ) { printf( "%s\t", rgszAlgorithmNames[algorithm] ); } printf( "\n" ); for ( int kBodies = 3; kBodies <= 96; kBodies += 3 ) { g_N = 1024*kBodies; printf( "%d\t", kBodies ); for ( int algorithm = GPU_AOS; algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]); algorithm++ ) { float sum = 0.0f; const int numIterations = 10; for ( int i = 0; i < numIterations; i++ ) { float ms, err; if ( ! ComputeGravitation( &ms, &err, (nbodyAlgorithm_enum) algorithm, g_bCrossCheck ) ) { fprintf( stderr, "Error computing timestep\n" ); exit(1); } sum += ms; } sum /= (float) numIterations; double interactionsPerSecond = (double) g_N*g_N*1000.0f / sum; if ( interactionsPerSecond > 1e9 ) { printf ( "%.2f\t", interactionsPerSecond/1e9 ); } else { printf ( "%.3f\t", interactionsPerSecond/1e9 ); } } printf( "\n" ); } return 0; #endif { int kIterations = 0; bool bStop = false; while ( ! bStop ) { float ms, err; if ( ! ComputeGravitation( &ms, &err, g_Algorithm, g_bCrossCheck ) ) { fprintf( stderr, "Error computing timestep\n" ); exit(1); } double interactionsPerSecond = (double) g_N*g_N*1000.0f / ms; if ( interactionsPerSecond > 1e9 ) { printf ( "\r%s: %8.2f ms = %8.3fx10^9 interactions/s (Rel. error: %E)\n", rgszAlgorithmNames[g_Algorithm], ms, interactionsPerSecond/1e9, err ); } else { printf ( "\r%s: %8.2f ms = %8.3fx10^6 interactions/s (Rel. error: %E)\n", rgszAlgorithmNames[g_Algorithm], ms, interactionsPerSecond/1e6, err ); } if (kMaxIterations) { kIterations++; if (kIterations >= kMaxIterations) { bStop = true; } } if ( kbhit() ) { char c = getch(); switch ( c ) { case ' ': if ( g_Algorithm == g_maxAlgorithm ) { g_Algorithm = g_bNoCPU ? GPU_AOS : CPU_AOS; // Skip slow CPU implementations if we are using SIMD for cross-check if ( g_bUseSIMDForCrossCheck ) { #if defined(HAVE_SIMD_THREADED) g_Algorithm = CPU_SIMD_threaded; #elif defined(HAVE_SIMD_OPENMP) g_Algorithm = CPU_SIMD_openmp; #endif } } else { g_Algorithm = (enum nbodyAlgorithm_enum) (g_Algorithm+1); } break; case 'q': case 'Q': bStop = true; break; } } } } if ( g_fGPUCrosscheckInput ) fclose( g_fGPUCrosscheckInput ); if ( g_fGPUCrosscheckOutput ) fclose( g_fGPUCrosscheckOutput ); return 0; Error: if ( g_fGPUCrosscheckInput ) fclose( g_fGPUCrosscheckInput ); if ( g_fGPUCrosscheckOutput ) fclose( g_fGPUCrosscheckOutput ); if ( hipSuccess != status ) { printf( "CUDA Error: %s\n", hipGetErrorString( status ) ); } return 1; }
554dbb90b66aa4138888c322b16aeb1654c3c0d3.cu
/* * * nbody.cu * * N-body example that illustrates gravitational simulation. * This is the type of computation that GPUs excel at: * parallelizable, with lots of FLOPS per unit of external * memory bandwidth required. * * Build with: nvcc -I ../chLib nbody.cu nbody_CPU_SSE.cpp nbody_CPU_AOS.cpp nbody_CPU_AOS_tiled.cpp nbody_CPU_SSE_threaded.cpp nbody_CPU_SOA.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu * On Linux: nvcc -I ../chLib nbody.cu nbody_CPU_SSE.cpp nbody_CPU_AOS.cpp nbody_CPU_AOS_tiled.cpp nbody_CPU_SSE_threaded.cpp nbody_CPU_SOA.cpp nbody_GPU_shared.cu nbody_multiGPU.cu nbody_multiGPU_threaded.cu -lcudart_static -ldl -lrt * Requires: No minimum SM requirement. If SM 3.x is not available, * this application quietly replaces the shuffle and fast-atomic * implementations with the shared memory implementation. * * Copyright (c) 2011-2012, Archaea Software, LLC. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #include <stdio.h> // for kbhit() #include <ch_conio.h> #include <math.h> #include <chCommandLine.h> #include <chError.h> #include <chThread.h> #include <chTimer.h> #include "nbody.h" #include "bodybodyInteraction.cuh" using namespace cudahandbook::threading; inline void randomVector( float v[3] ) { float lenSqr; do { v[0] = rand() / (float) RAND_MAX * 2 - 1; v[1] = rand() / (float) RAND_MAX * 2 - 1; v[2] = rand() / (float) RAND_MAX * 2 - 1; lenSqr = v[0]*v[0]+v[1]*v[1]+v[2]*v[2]; } while ( lenSqr > 1.0f ); } void randomUnitBodies( float *pos, float *vel, size_t N ) { for ( size_t i = 0; i < N; i++ ) { randomVector( &pos[4*i] ); randomVector( &vel[4*i] ); pos[4*i+3] = 1.0f; // unit mass vel[4*i+3] = 1.0f; } } template<typename T> static float relError( float a, float b ) { if ( a == b ) return 0.0f; return fabsf(a-b)/b; } bool g_bCUDAPresent; bool g_bSM30Present; float *g_hostAOS_PosMass; float *g_hostAOS_VelInvMass; float *g_hostAOS_Force; float *g_hostAOS_gpuCrossCheckForce[32]; float *g_dptrAOS_PosMass; float *g_dptrAOS_Force; // // threshold for soft comparisons when validating // that forces add up to 0. // double g_ZeroThreshold; bool g_bGPUTest; // Buffer to hold the golden version of the forces, used for comparison // Along with timing results, we report the maximum relative error with // respect to this array. float *g_hostAOS_Force_Golden; float *g_hostSOA_Pos[3]; float *g_hostSOA_Force[3]; float *g_hostSOA_Mass; float *g_hostSOA_InvMass; size_t g_N; float g_softening = 0.1f; float g_damping = 0.995f; float g_dt = 0.016f; template<typename T> static T relError( T a, T b ) { if ( a == b ) return 0.0f; T relErr = (a-b)/b; // Manually take absolute value return (relErr<0.0f) ? -relErr : relErr; } #include "nbody_CPU_AOS.h" #include "nbody_CPU_AOS_tiled.h" #include "nbody_CPU_SOA.h" #include "nbody_CPU_SIMD.h" #ifndef NO_CUDA #include "nbody_GPU_AOS.cuh" #include "nbody_GPU_AOS_const.cuh" #include "nbody_GPU_AOS_tiled.cuh" #include "nbody_GPU_AOS_tiled_const.cuh" //#include "nbody_GPU_SOA_tiled.cuh" #include "nbody_GPU_Shuffle.cuh" #include "nbody_GPU_Atomic.cuh" #endif #include "kahan.h" void integrateGravitation_AOS( float *ppos, float *pvel, float *pforce, float dt, float damping, size_t N ) { for ( size_t i = 0; i < N; i++ ) { int index = 4*i; int indexForce = 3*i; float pos[3], vel[3], force[3]; pos[0] = ppos[index+0]; pos[1] = ppos[index+1]; pos[2] = ppos[index+2]; float invMass = pvel[index+3]; vel[0] = pvel[index+0]; vel[1] = pvel[index+1]; vel[2] = pvel[index+2]; force[0] = pforce[indexForce+0]; force[1] = pforce[indexForce+1]; force[2] = pforce[indexForce+2]; // acceleration = force / mass; // new velocity = old velocity + acceleration * deltaTime vel[0] += (force[0] * invMass) * dt; vel[1] += (force[1] * invMass) * dt; vel[2] += (force[2] * invMass) * dt; vel[0] *= damping; vel[1] *= damping; vel[2] *= damping; // new position = old position + velocity * deltaTime pos[0] += vel[0] * dt; pos[1] += vel[1] * dt; pos[2] += vel[2] * dt; ppos[index+0] = pos[0]; ppos[index+1] = pos[1]; ppos[index+2] = pos[2]; pvel[index+0] = vel[0]; pvel[index+1] = vel[1]; pvel[index+2] = vel[2]; } } enum nbodyAlgorithm_enum g_Algorithm; // // g_maxAlgorithm is used to determine when to rotate g_Algorithm back to CPU_AOS // If CUDA is present, it is CPU_SIMD_threaded, otherwise it depends on SM version // // The shuffle and tiled implementations are SM 3.0 only. // // The CPU and GPU algorithms must be contiguous, and the logic in main() to // initialize this value must be modified if any new algorithms are added. // enum nbodyAlgorithm_enum g_maxAlgorithm; bool g_bCrossCheck = true; bool g_bUseSIMDForCrossCheck = true; bool g_bNoCPU = false; bool g_bGPUCrossCheck = false; bool g_bGPUCrossCheckFile = false; FILE *g_fGPUCrosscheckInput; FILE *g_fGPUCrosscheckOutput; bool ComputeGravitation( float *ms, float *maxRelError, nbodyAlgorithm_enum algorithm, bool bCrossCheck ) { cudaError_t status; bool bSOA = false; // AOS -> SOA data structures in case we are measuring SOA performance for ( size_t i = 0; i < g_N; i++ ) { g_hostSOA_Pos[0][i] = g_hostAOS_PosMass[4*i+0]; g_hostSOA_Pos[1][i] = g_hostAOS_PosMass[4*i+1]; g_hostSOA_Pos[2][i] = g_hostAOS_PosMass[4*i+2]; g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3]; g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i]; } if ( bCrossCheck ) { #ifdef HAVE_SIMD_THREADED if ( g_bUseSIMDForCrossCheck ) { ComputeGravitation_SIMD_threaded( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); for ( size_t i = 0; i < g_N; i++ ) { g_hostAOS_Force_Golden[3*i+0] = g_hostSOA_Force[0][i]; g_hostAOS_Force_Golden[3*i+1] = g_hostSOA_Force[1][i]; g_hostAOS_Force_Golden[3*i+2] = g_hostSOA_Force[2][i]; } } else { #endif ComputeGravitation_AOS( g_hostAOS_Force_Golden, g_hostAOS_PosMass, g_softening*g_softening, g_N ); #ifdef HAVE_SIMD_THREADED } #endif } // CPU->GPU copies in case we are measuring GPU performance if ( g_bCUDAPresent ) { cuda(MemcpyAsync( g_dptrAOS_PosMass, g_hostAOS_PosMass, 4*g_N*sizeof(float), cudaMemcpyHostToDevice ) ); } switch ( algorithm ) { case CPU_AOS: *ms = ComputeGravitation_AOS( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case CPU_AOS_tiled: *ms = ComputeGravitation_AOS_tiled( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case CPU_SOA: *ms = ComputeGravitation_SOA( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #ifdef HAVE_SIMD case CPU_SIMD: *ms = ComputeGravitation_SIMD( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifdef HAVE_SIMD_THREADED case CPU_SIMD_threaded: *ms = ComputeGravitation_SIMD_threaded( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifdef HAVE_SIMD_OPENMP case CPU_SIMD_openmp: *ms = ComputeGravitation_SIMD_openmp( g_hostSOA_Force, g_hostSOA_Pos, g_hostSOA_Mass, g_softening*g_softening, g_N ); bSOA = true; break; #endif #ifndef NO_CUDA case GPU_AOS: *ms = ComputeGravitation_GPU_AOS( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; case GPU_AOS_tiled: *ms = ComputeGravitation_GPU_AOS_tiled( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; case GPU_AOS_tiled_const: *ms = ComputeGravitation_GPU_AOS_tiled_const( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; #if 0 // commented out - too slow even on SM 3.0 case GPU_Atomic: cuda(Memset( g_dptrAOS_Force, 0, 3*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Atomic( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; #endif case GPU_Shared: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Shared( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; case GPU_Const: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeNBodyGravitation_GPU_AOS_const( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; case GPU_Shuffle: cuda(Memset( g_dptrAOS_Force, 0, 3*g_N*sizeof(float) ) ); *ms = ComputeGravitation_GPU_Shuffle( g_dptrAOS_Force, g_dptrAOS_PosMass, g_softening*g_softening, g_N ); cuda(Memcpy( g_hostAOS_Force, g_dptrAOS_Force, 3*g_N*sizeof(float), cudaMemcpyDeviceToHost ) ); break; case multiGPU_SingleCPUThread: memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) ); *ms = ComputeGravitation_multiGPU_singlethread( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; case multiGPU_MultiCPUThread: memset( g_hostAOS_Force, 0, 3*g_N*sizeof(float) ); *ms = ComputeGravitation_multiGPU_threaded( g_hostAOS_Force, g_hostAOS_PosMass, g_softening*g_softening, g_N ); break; #endif default: fprintf(stderr, "Unrecognized algorithm index: %d\n", algorithm); abort(); break; } if ( g_bGPUCrossCheck ) { int cDisagreements = 0; for ( int i = 0; i < g_numGPUs; i++ ) { for ( int j = 1; j < g_numGPUs; j++ ) { if ( memcmp( g_hostAOS_gpuCrossCheckForce[i], g_hostAOS_gpuCrossCheckForce[j], 3*g_N*sizeof(float) ) ) { fprintf( stderr, "GPU %d and GPU %d disagreed\n", i, j ); cDisagreements += 1; } } } if ( cDisagreements ) { goto Error; } } // SOA -> AOS if ( bSOA ) { for ( size_t i = 0; i < g_N; i++ ) { g_hostAOS_Force[3*i+0] = g_hostSOA_Force[0][i]; g_hostAOS_Force[3*i+1] = g_hostSOA_Force[1][i]; g_hostAOS_Force[3*i+2] = g_hostSOA_Force[2][i]; } } integrateGravitation_AOS( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_hostAOS_Force, g_dt, g_damping, g_N ); if ( g_bGPUCrossCheck && g_fGPUCrosscheckInput ) { if ( memcmp( g_hostAOS_Force, g_hostAOS_Force_Golden, 3*g_N*sizeof(float) ) ) { printf( "GPU CROSSCHECK FAILURE: Disagreement with golden values\n" ); goto Error; } } *maxRelError = 0.0f; if ( bCrossCheck ) { float max = 0.0f; for ( size_t i = 0; i < 3*g_N; i++ ) { float err = relError( g_hostAOS_Force[i], g_hostAOS_Force_Golden[i] ); if ( err > max ) { max = err; } } *maxRelError = max; } else { KahanAdder sumX; KahanAdder sumY; KahanAdder sumZ; for ( size_t i = 0; i < g_N; i++ ) { sumX += g_hostAOS_Force[i*3+0]; sumY += g_hostAOS_Force[i*3+1]; sumZ += g_hostAOS_Force[i*3+2]; } *maxRelError = std::max( fabs(sumX), std::max(fabs(sumY), fabs(sumZ)) ); if ( g_ZeroThreshold != 0.0 && fabs( *maxRelError ) > g_ZeroThreshold ) { printf( "Maximum sum of forces > threshold (%E > %E)\n", *maxRelError, g_ZeroThreshold ); goto Error; } } return true; Error: return false; } workerThread *g_CPUThreadPool; int g_numCPUCores; workerThread *g_GPUThreadPool; int g_numGPUs; struct gpuInit_struct { int iGPU; cudaError_t status; }; void initializeGPU( void *_p ) { cudaError_t status; gpuInit_struct *p = (gpuInit_struct *) _p; cuda(SetDevice( p->iGPU ) ); cuda(SetDeviceFlags( cudaDeviceMapHost ) ); cuda(Free(0) ); Error: p->status = status; } int main( int argc, char *argv[] ) { cudaError_t status; // kiloparticles int kParticles = 4, kMaxIterations = 0; if ( 1 == argc ) { printf( "Usage: nbody --numbodies <N> [--nocpu] [--nocrosscheck] [--iterations <N>]\n" ); printf( " --numbodies is multiplied by 1024 (default is 4)\n" ); printf( " By default, the app checks results against a CPU implementation; \n" ); printf( " disable this behavior with --nocrosscheck.\n" ); printf( " The CPU implementation may be disabled with --nocpu.\n" ); printf( " --nocpu implies --nocrosscheck.\n\n" ); printf( " --nosimd uses serial CPU implementation instead of SIMD.\n" ); printf( " --iterations specifies a fixed number of iterations to execute\n"); return 1; } // for reproducible results for a given N srand(7); { g_numCPUCores = processorCount(); g_CPUThreadPool = new workerThread[g_numCPUCores]; for ( size_t i = 0; i < g_numCPUCores; i++ ) { if ( ! g_CPUThreadPool[i].initialize( ) ) { fprintf( stderr, "Error initializing thread pool\n" ); return 1; } } } status = cudaGetDeviceCount( &g_numGPUs ); g_bCUDAPresent = (cudaSuccess == status) && (g_numGPUs > 0); if ( g_bCUDAPresent ) { cudaDeviceProp prop; cuda(GetDeviceProperties( &prop, 0 ) ); g_bSM30Present = prop.major >= 3; } g_bNoCPU = chCommandLineGetBool( "nocpu", argc, argv ); if ( g_bNoCPU && ! g_bCUDAPresent ) { printf( "--nocpu specified, but no CUDA present...exiting\n" ); exit(1); } g_bCrossCheck = ! chCommandLineGetBool( "nocrosscheck", argc, argv ); if ( g_bNoCPU ) { g_bCrossCheck = false; } if ( g_bCrossCheck && chCommandLineGetBool( "nosse", argc, argv ) ) { g_bUseSIMDForCrossCheck = false; } chCommandLineGet( &kParticles, "numbodies", argc, argv ); g_N = kParticles*1024; chCommandLineGet( &kMaxIterations, "iterations", argc, argv); // Round down to the nearest multiple of the CPU count (e.g. if we have // a system with a CPU count that isn't a power of two, we need to round) g_N -= g_N % g_numCPUCores; if ( chCommandLineGetBool( "gpu-crosscheck", argc, argv ) ) { g_bGPUCrossCheck = true; } g_bGPUCrossCheck = chCommandLineGetBool( "gpu-crosscheck", argc, argv ); { char *szFilename; if ( chCommandLineGet( &szFilename, "gpu-crosscheck-input-file", argc, argv ) ) { if ( ! g_bGPUCrossCheck ) { fprintf( stderr, "GPU crosscheck input file requires --gpu-crosscheck\n" ); goto Error; } g_fGPUCrosscheckInput = fopen( szFilename, "rb" ); if ( ! g_fGPUCrosscheckInput ) { fprintf( stderr, "Could not open %s for input\n", szFilename ); goto Error; } { int version; if ( 1 != fread( &version, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of version failed\n" ); goto Error; } if ( version != NBODY_GOLDENFILE_VERSION ) { fprintf( stderr, "File version mismatch - generate new golden files!\n" ); goto Error; } } if ( 1 != fread( &g_N, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of particle count failed\n" ); goto Error; } if ( 1 != fread( &kMaxIterations, sizeof(int), 1, g_fGPUCrosscheckInput ) ) { fprintf( stderr, "Read of iteration count failed\n" ); goto Error; } printf( "%d iterations specified in input file\n", kMaxIterations ); } if ( chCommandLineGet( &szFilename, "gpu-crosscheck-output-file", argc, argv ) ) { if ( g_fGPUCrosscheckInput ) { fprintf( stderr, "Crosscheck input and output files are mutually exclusive. Please specify only one.\n" ); goto Error; } if ( ! g_bGPUCrossCheck ) { fprintf( stderr, "GPU crosscheck output file requires --gpu-crosscheck\n" ); goto Error; } g_fGPUCrosscheckOutput = fopen( szFilename, "wb" ); if ( ! g_fGPUCrosscheckOutput ) { fprintf( stderr, "Could not open %s for output\n", szFilename ); goto Error; } if ( ! kMaxIterations ) { fprintf( stderr, "Must specify --iterations when generating output file for GPU cross check.\n" ); goto Error; } { int version = NBODY_GOLDENFILE_VERSION; if ( 1 != fwrite( &version, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of version failed\n" ); goto Error; } } if ( 1 != fwrite( &g_N, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of particle count failed\n" ); goto Error; } if ( 1 != fwrite( &kMaxIterations, sizeof(int), 1, g_fGPUCrosscheckOutput ) ) { fprintf( stderr, "Write of iteration count failed\n" ); goto Error; } } } chCommandLineGet( &g_ZeroThreshold, "zero", argc, argv ); if ( g_numGPUs ) { // optionally override GPU count from command line chCommandLineGet( &g_numGPUs, "numgpus", argc, argv ); g_GPUThreadPool = new workerThread[g_numGPUs]; for ( size_t i = 0; i < g_numGPUs; i++ ) { if ( ! g_GPUThreadPool[i].initialize( ) ) { fprintf( stderr, "Error initializing thread pool\n" ); return 1; } } for ( int i = 0; i < g_numGPUs; i++ ) { gpuInit_struct initGPU = {i}; g_GPUThreadPool[i].delegateSynchronous( initializeGPU, &initGPU ); if ( cudaSuccess != initGPU.status ) { fprintf( stderr, "Initializing GPU %d failed " " with %d (%s)\n", i, initGPU.status, cudaGetErrorString( initGPU.status ) ); return 1; } } } printf( "Running simulation with %d particles, crosscheck %s, CPU %s\n", (int) g_N, g_bCrossCheck ? "enabled" : "disabled", g_bNoCPU ? "disabled" : "enabled" ); #if defined(HAVE_SIMD_OPENMP) g_maxAlgorithm = CPU_SIMD_openmp; #elif defined(HAVE_SIMD_THREADED) g_maxAlgorithm = CPU_SIMD_threaded; #elif defined(HAVE_SIMD) g_maxAlgorithm = CPU_SIMD; #else g_maxAlgorithm = CPU_SOA; #endif g_Algorithm = g_bCUDAPresent ? GPU_AOS : g_maxAlgorithm; g_Algorithm = multiGPU_SingleCPUThread; if ( g_bCUDAPresent || g_bNoCPU ) { // max algorithm is different depending on whether SM 3.0 is present g_maxAlgorithm = g_bSM30Present ? GPU_AOS_tiled_const : multiGPU_MultiCPUThread; } if ( g_bCUDAPresent ) { cudaDeviceProp propForVersion; cuda(SetDeviceFlags( cudaDeviceMapHost ) ); cuda(GetDeviceProperties( &propForVersion, 0 ) ); if ( propForVersion.major < 3 ) { // Only SM 3.x supports shuffle and fast atomics, so we cannot run // some algorithms on this board. g_maxAlgorithm = multiGPU_MultiCPUThread; } cuda(HostAlloc( (void **) &g_hostAOS_PosMass, 4*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); for ( int i = 0; i < 3; i++ ) { cuda(HostAlloc( (void **) &g_hostSOA_Pos[i], g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_Force[i], g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); } cuda(HostAlloc( (void **) &g_hostAOS_Force, 3*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(HostAlloc( (void **) &g_hostAOS_Force_Golden, 3*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(HostAlloc( (void **) &g_hostAOS_VelInvMass, 4*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_Mass, g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(HostAlloc( (void **) &g_hostSOA_InvMass, g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); cuda(Malloc( &g_dptrAOS_PosMass, 4*g_N*sizeof(float) ) ); cuda(Malloc( (void **) &g_dptrAOS_Force, 3*g_N*sizeof(float) ) ); if ( g_bGPUCrossCheck ) { printf( "GPU cross check enabled (%d GPUs), disabling CPU\n", g_numGPUs ); g_bNoCPU = true; g_bCrossCheck = false; if ( g_numGPUs < 2 ) { fprintf( stderr, "GPU cross check enabled, but <2 GPUs available\n" ); goto Error; } for ( int i = 0; i < g_numGPUs; i++ ) { cuda(HostAlloc( (void **) (&g_hostAOS_gpuCrossCheckForce[i]), 3*g_N*sizeof(float), cudaHostAllocPortable|cudaHostAllocMapped ) ); } } } else { g_hostAOS_PosMass = new float[4*g_N]; for ( int i = 0; i < 3; i++ ) { g_hostSOA_Pos[i] = new float[g_N]; g_hostSOA_Force[i] = new float[g_N]; } g_hostSOA_Mass = new float[g_N]; g_hostAOS_Force = new float[3*g_N]; g_hostAOS_Force_Golden = new float[3*g_N]; g_hostAOS_VelInvMass = new float[4*g_N]; g_hostSOA_Mass = new float[g_N]; g_hostSOA_InvMass = new float[g_N]; } randomUnitBodies( g_hostAOS_PosMass, g_hostAOS_VelInvMass, g_N ); for ( size_t i = 0; i < g_N; i++ ) { g_hostSOA_Mass[i] = g_hostAOS_PosMass[4*i+3]; g_hostSOA_InvMass[i] = 1.0f / g_hostSOA_Mass[i]; } #if 0 // gather performance data over GPU implementations // for different problem sizes. printf( "kBodies\t" ); for ( int algorithm = GPU_AOS; algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]); algorithm++ ) { printf( "%s\t", rgszAlgorithmNames[algorithm] ); } printf( "\n" ); for ( int kBodies = 3; kBodies <= 96; kBodies += 3 ) { g_N = 1024*kBodies; printf( "%d\t", kBodies ); for ( int algorithm = GPU_AOS; algorithm < sizeof(rgszAlgorithmNames)/sizeof(rgszAlgorithmNames[0]); algorithm++ ) { float sum = 0.0f; const int numIterations = 10; for ( int i = 0; i < numIterations; i++ ) { float ms, err; if ( ! ComputeGravitation( &ms, &err, (nbodyAlgorithm_enum) algorithm, g_bCrossCheck ) ) { fprintf( stderr, "Error computing timestep\n" ); exit(1); } sum += ms; } sum /= (float) numIterations; double interactionsPerSecond = (double) g_N*g_N*1000.0f / sum; if ( interactionsPerSecond > 1e9 ) { printf ( "%.2f\t", interactionsPerSecond/1e9 ); } else { printf ( "%.3f\t", interactionsPerSecond/1e9 ); } } printf( "\n" ); } return 0; #endif { int kIterations = 0; bool bStop = false; while ( ! bStop ) { float ms, err; if ( ! ComputeGravitation( &ms, &err, g_Algorithm, g_bCrossCheck ) ) { fprintf( stderr, "Error computing timestep\n" ); exit(1); } double interactionsPerSecond = (double) g_N*g_N*1000.0f / ms; if ( interactionsPerSecond > 1e9 ) { printf ( "\r%s: %8.2f ms = %8.3fx10^9 interactions/s (Rel. error: %E)\n", rgszAlgorithmNames[g_Algorithm], ms, interactionsPerSecond/1e9, err ); } else { printf ( "\r%s: %8.2f ms = %8.3fx10^6 interactions/s (Rel. error: %E)\n", rgszAlgorithmNames[g_Algorithm], ms, interactionsPerSecond/1e6, err ); } if (kMaxIterations) { kIterations++; if (kIterations >= kMaxIterations) { bStop = true; } } if ( kbhit() ) { char c = getch(); switch ( c ) { case ' ': if ( g_Algorithm == g_maxAlgorithm ) { g_Algorithm = g_bNoCPU ? GPU_AOS : CPU_AOS; // Skip slow CPU implementations if we are using SIMD for cross-check if ( g_bUseSIMDForCrossCheck ) { #if defined(HAVE_SIMD_THREADED) g_Algorithm = CPU_SIMD_threaded; #elif defined(HAVE_SIMD_OPENMP) g_Algorithm = CPU_SIMD_openmp; #endif } } else { g_Algorithm = (enum nbodyAlgorithm_enum) (g_Algorithm+1); } break; case 'q': case 'Q': bStop = true; break; } } } } if ( g_fGPUCrosscheckInput ) fclose( g_fGPUCrosscheckInput ); if ( g_fGPUCrosscheckOutput ) fclose( g_fGPUCrosscheckOutput ); return 0; Error: if ( g_fGPUCrosscheckInput ) fclose( g_fGPUCrosscheckInput ); if ( g_fGPUCrosscheckOutput ) fclose( g_fGPUCrosscheckOutput ); if ( cudaSuccess != status ) { printf( "CUDA Error: %s\n", cudaGetErrorString( status ) ); } return 1; }
5590eeeec316c4a1303399656007ec67f611dba5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Template project which demonstrates the basics on how to setup a project * example application. * Device code. */ #ifndef _FIXERRORSVOTING_KERNEL_H_ #define _FIXERRORSVOTING_KERNEL_H_ #include <stdio.h> #include <string.h> #include <ctype.h> #include "FixErrorsVoting.h" texture<unsigned char,1, hipReadModeElementType> tex; __constant__ unsigned int _char_size_ = 0x08; // 8 bits in 1 char(unsigned) __constant__ unsigned char _bit_mask_[8] = { 0x01, //00000001 0x02, //00000010 0x04, //00000100 0x08, //00001000 0x10, //00010000 0x20, //00100000 0x40, //01000000 0x80 //10000000 }; __device__ char nextNuc[256]; __constant__ char unmasked_nuc[256] = {0, 1, 2, 3, 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0', 'A','\0', 'C','\0','\0', // 69 '\0', 'G', 'H','\0','\0', 'K','\0', 'M', 'N','\0', // 79 '\0','\0', 'R', 'S', 'T','\0','\0', 'W', 'X', 'Y', // 89 '\0','\0','\0','\0','\0','\0','\0', 'A','\0', 'C', // 99 '\0','\0','\0', 'G', 'H','\0','\0', 'K','\0', 'M', // 109 'N', '\0','\0','\0', 'R', 'S', 'T','\0','\0','\0', // 119 'X', '\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 0, 1, 2, 3, 0, 1, // 249 2, 3, 0, 1, 2, 3}; // 255 __constant__ char nuc_char[256] = {'G', 'A', 'C', 'T', 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0', 'A','\0', 'C','\0','\0', // 69 '\0', 'G', 'H','\0','\0', 'K','\0', 'M', 'N','\0', // 79 '\0','\0', 'R', 'S', 'T','\0','\0', 'W','\0', 'Y', // 89 '\0','\0','\0','\0','\0','\0','\0', 'a','\0', 'c', // 99 '\0','\0','\0', 'g', 'h','\0','\0', 'k','\0', 'm', // 109 'n', '\0','\0','\0', 'r', 's', 't','\0','\0','\0', // 119 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 'g', 'a', 'c', 't', 'g', 'a', // 249 'c', 't','g','a','c','t'}; // 255 __constant__ char unmasked_nuc_index[256] = { 0,1,2,3,4,4,4,4,4,4, // 0 9 4,4,4,4,4,4,4,4,4,4, // 10 19 4,4,4,4,4,4,4,4,4,4, // 20 29 4,4,4,4,4,4,4,4,4,4, // 30 39 4,4,4,4,4,4,4,4,4,4, // 40 49 4,4,4,4,4,4,4,4,4,4, // 50 59 4,4,4,4,4,1,4,2,4,4, // 60 69 4,0,4,4,4,4,4,4,4,4, // 70 79 4,4,4,4,3,4,4,4,4,4, // 80 89 4,4,4,4,4,4,4,1,4,2, // 90 99 4,4,4,0,4,4,4,4,4,4, // 100 109 4,4,4,4,4,4,3,4,4,4, // 110 119 4,4,4,4,4,4,4,4,4,4, // 120 129 4,4,4,4,4,4,4,4,4,4, // 130 139 4,4,4,4,4,4,4,4,4,4, // 140 149 4,4,4,4,4,4,4,4,4,4, // 150 159 4,4,4,4,4,4,4,4,4,4, // 160 169 4,4,4,4,4,4,4,4,4,4, // 170 179 4,4,4,4,4,4,4,4,4,4, // 180 189 4,4,4,4,4,4,4,4,4,4, // 190 199 4,4,4,4,4,4,4,4,4,4, // 200 209 4,4,4,4,4,4,4,4,4,4, // 210 219 4,4,4,4,4,4,4,4,4,4, // 220 229 4,4,4,4,4,4,4,4,4,4, // 230 239 4,4,4,4,0,1,2,3,0,1, // 240 249 2,3,0,1,2,3 }; // 250 255 __constant__ char numeric_nuc_index[256] = { 0,1,2,3,4,4,4,4,4,4, // 0 9 4,4,4,4,4,4,4,4,4,4, // 10 19 4,4,4,4,4,4,4,4,4,4, // 20 29 4,4,4,4,4,4,4,4,4,4, // 30 39 4,4,4,4,4,4,4,4,4,4, // 40 49 4,4,4,4,4,4,4,4,4,4, // 50 59 4,4,4,4,4,1,4,2,4,4, // 60 69 4,0,4,4,4,4,4,4,4,4, // 70 79 4,4,4,4,3,4,4,4,4,4, // 80 89 4,4,4,4,4,4,4,-3,4,-2, // 90 99 4,4,4,-4,4,4,4,4,4,4, // 100 109 4,4,4,4,4,4,-1,4,4,4, // 110 119 4,4,4,4,4,4,4,4,4,4, // 120 129 4,4,4,4,4,4,4,4,4,4, // 130 139 4,4,4,4,4,4,4,4,4,4, // 140 149 4,4,4,4,4,4,4,4,4,4, // 150 159 4,4,4,4,4,4,4,4,4,4, // 160 169 4,4,4,4,4,4,4,4,4,4, // 170 179 4,4,4,4,4,4,4,4,4,4, // 180 189 4,4,4,4,4,4,4,4,4,4, // 190 199 4,4,4,4,4,4,4,4,4,4, // 200 209 4,4,4,4,4,4,4,4,4,4, // 210 219 4,4,4,4,4,4,4,4,4,4, // 220 229 4,4,4,4,4,4,4,4,4,4, // 230 239 4,4,4,4,-12,-11,-10,-9,-8,-7, // 240 249 -6,-5,-4,-3,-2,-1 }; // 250 255 __constant__ unsigned char nucToIndex[256] = {16,16,16,16,16,16,16,16,16,16, // 0 16,16,16,16,16,16,16,16,16,16, // 10 16,16,16,16,16,16,16,16,16,16, // 20 16,16,16,16,16,16,16,16,16,16, // 30 16,16,16,16,16,16,16,16,16,16, // 40 16,16,16,16,16,16,16,16,16,16, // 50 16,16,16,16,16,1,12,2,14,16, // 60 16,0,11,16,16,116,16,9,4,16, // 70 16,16,5,8,3,16,13,7,15,6, // 80 16,16,16,16,16,16,16,253,12,254, // 90 14,16,16,252,11,8,16,116,16,9, // 100 4,16,16,16,5,16,255,16,13,7, // 110 15,6,16,16,16,16,16,16,16,16, // 120 16,16,16,16,16,16,16,16,16,16, // 130 16,16,16,16,16,16,16,16,16,16, // 140 16,16,16,16,16,16,16,16,16,16, // 150 16,16,16,16,16,16,16,16,16,16, // 160 16,16,16,16,16,16,16,16,16,16, // 170 16,16,16,16,16,16,16,16,16,16, // 180 16,16,16,16,16,16,16,16,16,16, // 190 16,16,16,16,16,16,16,16,16,16, // 200 16,16,16,16,16,16,16,16,16,16, // 210 16,16,16,16,16,16,16,16,16,16, // 220 16,16,16,16,16,16,16,16,16,16, // 230 16,16,16,16,16,16,16,16,16,16, // 240 16,16,16,16,16,16}; // 250 __constant__ char indexToNuc[256] = {'G', 'A', 'C', 'T', 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 69 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 79 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 89 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 99 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 109 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 119 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 'g', 'a', 'c', 't', 'g', 'a', // 249 'c', 't','g','a','c','t'}; // 255 __device__ int _toupper_(int ch) { if ((unsigned int)(ch - 'a') < 26u ) ch += 'A' - 'a'; return ch; } __device__ char * _strcpy_(char *s1, char *s2) { char *os1; os1 = s1; while (*s1++ = *s2++) ; return(os1); } __device__ char * _strncpy_(char *dst, const char *src,register size_t n) { if (n != 0) { register char *d = dst; register const char *s = src; do { if ((*d++ = *s++) == 0) { /* NUL pad the remaining n-1 bytes */ while (--n != 0) *d++ = 0; break; } } while (--n != 0); } return (dst); } //Check each char inside this read, only "A/C/T/G" allowed in the fasta file __device__ int PrepareSequence(char *read) { int p; int return_value = 1; for (p = 0; p < READ_LENGTH; p++ ) { read[p] = _toupper_(read[p]); if (!(read[p] == 'A' || read[p] == 'C' || read[p] == 'T' || read[p] == 'G')) { return_value = 0; break; } } return return_value; } //Check whether bloom filter contains "string key" __device__ bool contains(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; unsigned int b = 378551; unsigned int a = 63689; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_RSHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = hash * a + (str[i]); a = a * b; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_JSHash_ hash = 1315423911; i=0; for(i = 0; i < len; i++) { hash ^= ((hash << 5) + (str[i]) + (hash >> 2)); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_PJWHash_ unsigned int ThreeQuarters = (unsigned int)(((unsigned int)(sizeof(unsigned int) * 8) * 3) / 4); unsigned int HighBits = (unsigned int)(0xFFFFFFFF) << (sizeof(unsigned int) * 7); hash= 0; a= 0; i= 0; for(i = 0; i < len; i++) { hash = (hash << sizeof(unsigned int)) + (str[i]); if((a = hash & HighBits) != 0) { hash = (( hash ^ (a >> ThreeQuarters)) & (~HighBits)); } } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_ELFHash_ hash=0;i=0;a=0; for(i = 0; i < len; i++) { hash = (hash << 4) + (str[i]); if((a = hash & 0xF0000000L) != 0) { hash ^= (a >> 24); } hash &= ~a; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); ; if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_BKDRHash_ hash=0;i=0;a=131; for(i = 0; i < len; i++) { hash = (hash * a) + (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_SDBMHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = (str[i]) + (hash << 6) + (hash << 16) - hash; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_DJBHash_ hash = 5381;i=0; for(i = 0; i < len; i++) { hash = ((hash << 5) + hash) + (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } return true; } __device__ bool contains2(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_DEKHash_ hash = len;i=0; for(i = 0; i < len; i++) { hash = ((hash << 5) ^ (hash >> 27)) ^ (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } /* //_BPHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = hash << 7 ^ (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_FNVHash_ a = 0x811C9DC5; hash= 0; i= 0; for(i = 0; i < len; i++) { hash *= a; hash ^= (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_APHash_ hash = 0xAAAAAAAA;i=0; for(i = 0; i < len; i++) { hash ^= ((i & 1) == 0) ? ( (hash << 7) ^ (str[i]) * (hash >> 3)) : (~((hash << 11) + (str[i]) ^ (hash >> 5))); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } */ return true; } __device__ bool contains3(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_krHash_ hash = 0; for(i = 0; i < len; i++) { hash += str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_ocaml_hash_ hash=0;i=0; for (i=0; i<len; i++) { hash = hash*19 + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_sml_hash_ hash=0;i=0; for (i=0; i<len; i++) { hash = 33*hash + 720 + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_stl_hash_ _strncpy_(str, key,TUPLE_SIZE); hash=0;i=0; for (i=0; i<len; i++) { hash = 5*hash + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } return true; } //search tuple from bloon filter __device__ int lstspct_FindTuple(char *tuple, int numTuples) { //check whether in the bloom filter //if(contains(tuple,numTuples * 4)&&contains2(tuple,numTuples * 4)&&contains3(tuple,numTuples * 4)) if(contains(tuple,numTuples * BLOOM_SIZE)&&contains2(tuple,numTuples * BLOOM_SIZE)) return 1; else return -1; } __device__ int d_strTpl_Valid(char *st) { int i; int return_value = 1; if (st == NULL) return_value = 0; else { for (i = 0; i < TUPLE_SIZE; i++) { if (numeric_nuc_index[st[i]] >= 4) { return_value = 0; break; } } } return return_value; } //check whether the read is solid or not, by examine each tuple in this read, whether can be found or not in //all the string tuple list __device__ int CheckSolid(char *seq, int tupleSize, int numTuples){ int p; char tuple[TUPLE_SIZE+1]; int return_value = 1; for (p = 0; p < READ_LENGTH - tupleSize +1; p++ ){ _strncpy_(tuple, (char*) &seq[p],tupleSize); tuple[tupleSize] = 0; if (lstspct_FindTuple(tuple,numTuples) == -1) { return_value = 0; break; } } return return_value; } __device__ int SolidSubsequence(char *seq, int tupleSize, int &seqStart, int &seqEnd, int numTuples) { int i; int solidSubsequence = 1; //char tempTuple[TUPLE_SIZE+1]; char *tempTuple; for (i = seqStart; i < seqEnd - tupleSize + 1; i++) { //_strncpy_(tempTuple , (char*) &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple,numTuples) == -1) { solidSubsequence = 0; break; } } return solidSubsequence; } __device__ int TrimSequence(char *seq, int tupleSize, int &seqStart, int &seqEnd, int numTuples,int maxTrim) { int i; seqStart = 0; int flag = 1; //char tempTuple[TUPLE_SIZE+1]; char *tempTuple; //get length of this read int len = seq[READ_LENGTH + 1]; for (i = 0; i < len - tupleSize + 1; i++ ) { //_strncpy_(tempTuple , &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple,numTuples) != -1) { break; } // Not solid yet, advance seqStart++; } seqEnd = len; for (i = seqStart + 1; i < len - tupleSize + 1; i++ ) { //_strncpy_(tempTuple , &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple, numTuples) == -1) { break; } } if (i == len - tupleSize) // The sequence is not trimmed. seqEnd = len - 1; else // The sequence is trimmed. Trim end is the index of the first // 'bad' nucleotide. Since seqStart is the index of the first // 'good' nucleotide, seqEnd - seqStart is the length of the // untrimmed seq. In other words, it's half open 0 based // indexing. seqEnd = i + tupleSize-1; if (seqStart > maxTrim) // return 0; flag = 0; else if (len - seqEnd > maxTrim) //return 0; flag = 0; else if(SolidSubsequence(seq, tupleSize, seqStart, seqEnd,numTuples) == 0) // return 0; flag = 0; else { int newLength = seqEnd - seqStart + 1; for (int s = 0; s < newLength; s++ ) { seq[s] = seq[s + seqStart]; } //seq.length = newLength -1; len = newLength -1; } //save the new length //_strncpy_(&seq[READ_LENGTH + 1], itoa1(len),4); //itoa1(len,&seq[READ_LENGTH + 1]); seq[READ_LENGTH + 1] = len; return flag; } //////////////////////////////////////////////////////////////////////////////// //! Fix two errors, step 1 kernel function //! @param d_reads_arr input data in global memory //! @param d_param input data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void fix_errors1(char *d_reads_arr,Param *d_param) { short numSearch=1; nextNuc['G'] = 'A'; nextNuc['A'] = 'C'; nextNuc['C'] = 'T'; nextNuc['T'] = 'G'; int c_tid = blockIdx.x * blockDim.x + threadIdx.x; int round = 0; int total_thread = BLOCK * THREAD; int discardSeq=0; int trimStart=0, trimEnd=0; int chunk_bound = (total_thread < MAX_READS_BOUND ? total_thread:MAX_READS_BOUND); round = d_param->NUM_OF_READS/chunk_bound + (d_param->NUM_OF_READS%chunk_bound == 0 ? 0:1); int maxPos[READ_LENGTH * 4],maxMod[READ_LENGTH * 4]; unsigned char votes[READ_LENGTH][4],mutNuc, mutNuc2, prev, cur; int solid[READ_LENGTH]; int s,i,j,m,n,startPos, fixPos=-1,numFixed = 0,numChanges=0; short return_value = 0,flag = 0,flag1=1; // Cast votes for mutations int p,vp,mut; short numAboveThreshold = 0,newLength,len; short maxVotes = 0,allGood = 1; int numTies = -1,pindex = 0, mod, pos,current_read_idx; char *tempTuple, *read; for(i=0;i<round;i++) { flag = 0;flag1=1;numFixed = 0; numChanges=0; return_value = 0;discardSeq = 0; current_read_idx = c_tid + chunk_bound * i; //check if run out of reads current_read_idx = (current_read_idx > d_param->NUM_OF_READS ? 0:current_read_idx); //take 1 read per thread read = &d_reads_arr[current_read_idx*(READ_LENGTH + 2)]; //get length of this read len = read[READ_LENGTH + 1]; if (!PrepareSequence(read)) { discardSeq = 1; } else { numFixed = 0; fixPos = -1; do{ if(flag) break; else{ if (fixPos > 0) startPos = fixPos; else startPos = 0; for (m = 0; m < READ_LENGTH; m++) { for (int n = 0; n < 4; n++) //votes[threadIdx.x][m][n] = 0; votes[m][n] = 0; } for(m=0;m<READ_LENGTH;m++) solid[m] = 0; for (p = startPos; p < len - d_param->tupleSize + 1; p++ ){ tempTuple = &read[p]; if (d_strTpl_Valid(tempTuple)){ if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) solid[p] = 1; else{ for (vp = 0; vp < d_param->tupleSize; vp++){ mutNuc = nextNuc[read[p + vp]]; read[p + vp] = mutNuc; for (mut = 0; mut < 3; mut++ ){ tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) votes[vp + p][unmasked_nuc_index[mutNuc]]++; mutNuc = nextNuc[mutNuc]; read[p + vp] = mutNuc; } } } } } ////////////////vote completed////////////////////// ++numFixed; //////////////////////fix sequence based on voting in previous step////////////// fixPos = 0;numAboveThreshold = 0;maxVotes = 0;allGood = 1; for (p = 0; p < len - d_param->tupleSize + 1; p++ ) { if (solid[p] == 0) { allGood = 0;break; } } if (allGood) // no need to fix this sequence return_value = 1; else { for (p = 0; p < len; p++){ for (m = 0; m < 4; m++){ if (votes[p][m] > d_param->minVotes) numAboveThreshold++; if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } pindex = 0;numTies = -1; // Make sure there aren't multiple possible fixes for (p = 0; p < len; p++){ for (m = 0; m < 4; m++){ if (votes[p][m] == maxVotes){ numTies++; maxPos[pindex] = p; maxMod[pindex] = m; pindex++; } } } if (numAboveThreshold > 0 ){ if (numTies < numSearch || (pindex > 1 && maxPos[0] != maxPos[1])){ // Found at least one change to the sequence for (s = 0; s < numSearch && s < pindex; s++) { mod = maxMod[s]; pos = maxPos[s]; fixPos = pos; if (mod < 4){ prev = read[pos]; cur = nuc_char[mod]; read[pos] = cur; } } if( CheckSolid(read,d_param->tupleSize,d_param->numTuples)) return_value = 1; else{ //reset return_value = 0; //read[pos] = prev; } } else { return_value = 0; } } else { return_value = 0; } } //check fix sequence return if( return_value) { flag = 1; numChanges = numFixed; break; } } } while (fixPos > 0); /////////////////////////end of solidify//////// if (numChanges != 0){ if (numChanges > d_param->maxMods) discardSeq = 1; else discardSeq = 0; } else{ if( d_param->numSearch == 2){ //removed trim in fix error1 discardSeq = 1; } else { // Find the locations of the first solid positions. if (d_param->doTrim) { if(TrimSequence(read, d_param->tupleSize,trimStart, trimEnd, d_param->numTuples,d_param->maxTrim)){ // If there is space for one solid tuple (trimStart < trimEnd - ts+1) // and the subsequence between the trimmed ends is ok, print the // trimmed coordinates. discardSeq = 0; } else discardSeq = 1; } else discardSeq = 1; } } } if (discardSeq) { read[READ_LENGTH] = 'D'; //F fixed, D: not fixed, discard } else { read[READ_LENGTH] = 'F'; //F fixed, D: not fixed, discard } __syncthreads(); } } //////////////////////////////////////////////////////////////////////////////// //! Fix two errors step 2 kernel function //! @param d_reads_arr input data in global memory //! @param d_param input data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void fix_errors2(char *d_reads_arr,Param *d_param, int numReads) { short numSearch = 2; nextNuc['G'] = 'A'; nextNuc['A'] = 'C'; nextNuc['C'] = 'T'; nextNuc['T'] = 'G'; int c_tid = blockIdx.x * blockDim.x + threadIdx.x; int round = 0; int total_thread = BLOCK * THREAD; int discardSeq=0; int trimStart=0, trimEnd=0; int chunk_bound = (total_thread < MAX_READS_BOUND ? total_thread:MAX_READS_BOUND); round = numReads/chunk_bound + (numReads%chunk_bound == 0 ? 0:1); int maxPos[READ_LENGTH * 4]; int maxMod[READ_LENGTH * 4]; unsigned char votes[READ_LENGTH][4],mutNuc, mutNuc2, prev, cur; int solid[READ_LENGTH]; //__shared__ unsigned char solid[READ_LENGTH]; int s,i,j,m,n; int startPos, fixPos=-1; int numFixed = 0,numChanges=0; short return_value = 0,flag = 0,flag1=1; // Cast votes for mutations int p,vp,mut; short numAboveThreshold = 0; short maxVotes = 0,allGood = 1; int numTies = -1; int pindex = 0; int mod, pos; short newLength,len; int current_read_idx; char *tempTuple, *read; /* Since GPU cannot process all reads at the same time (limited block NO.), the reads are divided into several rounds to process. */ for(i=0;i<round;i++) { flag = 0; flag1=1;numFixed = 0;numChanges=0;return_value = 0; current_read_idx = c_tid + chunk_bound * i; //check if run out of reads current_read_idx = (current_read_idx > numReads ? 0:current_read_idx); //take 1 read per thread read = &d_reads_arr[current_read_idx*(READ_LENGTH + 2)]; //get length of this read len = read[READ_LENGTH + 1]; discardSeq = 0; if (!PrepareSequence(read)) discardSeq = 1; else { numFixed = 0; fixPos = -1; do { if(flag) break; else{ if (fixPos > 0) startPos = fixPos; else startPos = 0; for (m = 0; m < READ_LENGTH; m++) { for (int n = 0; n < 4; n++) votes[m][n] = 0; } for(m=0;m<READ_LENGTH;m++) solid[m] = 0; for (p = startPos; p < len - d_param->tupleSize + 1; p++ ) { tempTuple = &read[p]; if (d_strTpl_Valid(tempTuple)) { if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) solid[p] = 1; else{ for (vp = 0; vp < d_param->tupleSize-1; vp++) { mutNuc = nextNuc[read[p + vp]]; read[p + vp] = mutNuc; for (mut = 0; mut < 3; mut++ ) { tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) { votes[vp + p][unmasked_nuc_index[mutNuc]]++; } //delta = 2 for(m=vp+1;m<d_param->tupleSize;m++) { mutNuc2 = nextNuc[read[p + m]]; read[p + m] = mutNuc2; for(n=0;n<3;n++) { tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) { votes[vp + p][unmasked_nuc_index[mutNuc]]++;//history votes[m + p][unmasked_nuc_index[mutNuc2]]++; } mutNuc2 = nextNuc[mutNuc2]; read[p + m] = mutNuc2; } } mutNuc = nextNuc[mutNuc]; read[p + vp] = mutNuc; } } } } } ++numFixed; //fix sequence based on voting in previous step fixPos = 0;numAboveThreshold = 0;maxVotes = 0;allGood = 1; for (p = 0; p < len - d_param->tupleSize + 1; p++ ) { if (solid[p] == 0) { allGood = 0; break; } } if (allGood) // no need to fix this sequence return_value = 1; else{ for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] > d_param->minVotes) numAboveThreshold++; if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } pindex = 0;numTies = -1; for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] == maxVotes) { numTies++; maxPos[pindex] = p; maxMod[pindex] = m; pindex++; } } } //second votes[p][m] = 0; maxVotes = 0; for (p = 0; p < len ; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] == maxVotes) { maxPos[pindex] = p; maxMod[pindex] = m; //pindex++; } } } __syncthreads(); if (numAboveThreshold > 0 ) { //if (numTies < numSearch || (pindex > 1 && maxPos[0] != maxPos[1])){ // Found at least one change to the sequence for (s = 0; s < 2; s++) { mod = maxMod[s]; pos = maxPos[s]; fixPos = pos; if (mod < 4){ prev = read[pos]; cur = nuc_char[mod]; read[pos] = nuc_char[mod]; } } return_value = CheckSolid(read,d_param->tupleSize,d_param->numTuples); //} //else { return_value = 0; //} } else { return_value = 0; } __syncthreads(); } //check fix sequence return if( return_value){ flag = 1; numChanges = numFixed; break; } }//if flag } while (fixPos > 0); /////////////////////////end of solidify//////// if (numChanges != 0) { if (numChanges > d_param->maxMods){ discardSeq = 1; //_strncpy_(read , original, READ_LENGTH + 2); } else { discardSeq = 0; } } else { // Find the locations of the first solid positions. if (d_param->doTrim){ if(TrimSequence(read, d_param->tupleSize,trimStart, trimEnd, d_param->numTuples,d_param->maxTrim)){ // If there is space for one solid tuple (trimStart < trimEnd - ts+1) // and the subsequence between the trimmed ends is ok, print the // trimmed coordinates. discardSeq = 0; } else discardSeq = 1; } else { discardSeq = 1; } } } if (discardSeq) { read[READ_LENGTH] = 'D'; //last char for indicator } else { read[READ_LENGTH] = 'F'; //F fixed, D: not fixed, discard } __syncthreads(); } } #endif // #ifndef _FIXERRORSVOTING_KERNEL_H_
5590eeeec316c4a1303399656007ec67f611dba5.cu
/* * Copyright 1993-2007 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. Users and possessors of this source code * are hereby granted a nonexclusive, royalty-free license to use this code * in individual and commercial software. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. * * Any use of this source code in individual and commercial software must * include, in the user documentation and internal comments to the code, * the above Disclaimer and U.S. Government End Users Notice. */ /* Template project which demonstrates the basics on how to setup a project * example application. * Device code. */ #ifndef _FIXERRORSVOTING_KERNEL_H_ #define _FIXERRORSVOTING_KERNEL_H_ #include <stdio.h> #include <string.h> #include <ctype.h> #include "FixErrorsVoting.h" texture<unsigned char,1, cudaReadModeElementType> tex; __constant__ unsigned int _char_size_ = 0x08; // 8 bits in 1 char(unsigned) __constant__ unsigned char _bit_mask_[8] = { 0x01, //00000001 0x02, //00000010 0x04, //00000100 0x08, //00001000 0x10, //00010000 0x20, //00100000 0x40, //01000000 0x80 //10000000 }; __device__ char nextNuc[256]; __constant__ char unmasked_nuc[256] = {0, 1, 2, 3, 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0', 'A','\0', 'C','\0','\0', // 69 '\0', 'G', 'H','\0','\0', 'K','\0', 'M', 'N','\0', // 79 '\0','\0', 'R', 'S', 'T','\0','\0', 'W', 'X', 'Y', // 89 '\0','\0','\0','\0','\0','\0','\0', 'A','\0', 'C', // 99 '\0','\0','\0', 'G', 'H','\0','\0', 'K','\0', 'M', // 109 'N', '\0','\0','\0', 'R', 'S', 'T','\0','\0','\0', // 119 'X', '\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 0, 1, 2, 3, 0, 1, // 249 2, 3, 0, 1, 2, 3}; // 255 __constant__ char nuc_char[256] = {'G', 'A', 'C', 'T', 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0', 'A','\0', 'C','\0','\0', // 69 '\0', 'G', 'H','\0','\0', 'K','\0', 'M', 'N','\0', // 79 '\0','\0', 'R', 'S', 'T','\0','\0', 'W','\0', 'Y', // 89 '\0','\0','\0','\0','\0','\0','\0', 'a','\0', 'c', // 99 '\0','\0','\0', 'g', 'h','\0','\0', 'k','\0', 'm', // 109 'n', '\0','\0','\0', 'r', 's', 't','\0','\0','\0', // 119 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 'g', 'a', 'c', 't', 'g', 'a', // 249 'c', 't','g','a','c','t'}; // 255 __constant__ char unmasked_nuc_index[256] = { 0,1,2,3,4,4,4,4,4,4, // 0 9 4,4,4,4,4,4,4,4,4,4, // 10 19 4,4,4,4,4,4,4,4,4,4, // 20 29 4,4,4,4,4,4,4,4,4,4, // 30 39 4,4,4,4,4,4,4,4,4,4, // 40 49 4,4,4,4,4,4,4,4,4,4, // 50 59 4,4,4,4,4,1,4,2,4,4, // 60 69 4,0,4,4,4,4,4,4,4,4, // 70 79 4,4,4,4,3,4,4,4,4,4, // 80 89 4,4,4,4,4,4,4,1,4,2, // 90 99 4,4,4,0,4,4,4,4,4,4, // 100 109 4,4,4,4,4,4,3,4,4,4, // 110 119 4,4,4,4,4,4,4,4,4,4, // 120 129 4,4,4,4,4,4,4,4,4,4, // 130 139 4,4,4,4,4,4,4,4,4,4, // 140 149 4,4,4,4,4,4,4,4,4,4, // 150 159 4,4,4,4,4,4,4,4,4,4, // 160 169 4,4,4,4,4,4,4,4,4,4, // 170 179 4,4,4,4,4,4,4,4,4,4, // 180 189 4,4,4,4,4,4,4,4,4,4, // 190 199 4,4,4,4,4,4,4,4,4,4, // 200 209 4,4,4,4,4,4,4,4,4,4, // 210 219 4,4,4,4,4,4,4,4,4,4, // 220 229 4,4,4,4,4,4,4,4,4,4, // 230 239 4,4,4,4,0,1,2,3,0,1, // 240 249 2,3,0,1,2,3 }; // 250 255 __constant__ char numeric_nuc_index[256] = { 0,1,2,3,4,4,4,4,4,4, // 0 9 4,4,4,4,4,4,4,4,4,4, // 10 19 4,4,4,4,4,4,4,4,4,4, // 20 29 4,4,4,4,4,4,4,4,4,4, // 30 39 4,4,4,4,4,4,4,4,4,4, // 40 49 4,4,4,4,4,4,4,4,4,4, // 50 59 4,4,4,4,4,1,4,2,4,4, // 60 69 4,0,4,4,4,4,4,4,4,4, // 70 79 4,4,4,4,3,4,4,4,4,4, // 80 89 4,4,4,4,4,4,4,-3,4,-2, // 90 99 4,4,4,-4,4,4,4,4,4,4, // 100 109 4,4,4,4,4,4,-1,4,4,4, // 110 119 4,4,4,4,4,4,4,4,4,4, // 120 129 4,4,4,4,4,4,4,4,4,4, // 130 139 4,4,4,4,4,4,4,4,4,4, // 140 149 4,4,4,4,4,4,4,4,4,4, // 150 159 4,4,4,4,4,4,4,4,4,4, // 160 169 4,4,4,4,4,4,4,4,4,4, // 170 179 4,4,4,4,4,4,4,4,4,4, // 180 189 4,4,4,4,4,4,4,4,4,4, // 190 199 4,4,4,4,4,4,4,4,4,4, // 200 209 4,4,4,4,4,4,4,4,4,4, // 210 219 4,4,4,4,4,4,4,4,4,4, // 220 229 4,4,4,4,4,4,4,4,4,4, // 230 239 4,4,4,4,-12,-11,-10,-9,-8,-7, // 240 249 -6,-5,-4,-3,-2,-1 }; // 250 255 __constant__ unsigned char nucToIndex[256] = {16,16,16,16,16,16,16,16,16,16, // 0 16,16,16,16,16,16,16,16,16,16, // 10 16,16,16,16,16,16,16,16,16,16, // 20 16,16,16,16,16,16,16,16,16,16, // 30 16,16,16,16,16,16,16,16,16,16, // 40 16,16,16,16,16,16,16,16,16,16, // 50 16,16,16,16,16,1,12,2,14,16, // 60 16,0,11,16,16,116,16,9,4,16, // 70 16,16,5,8,3,16,13,7,15,6, // 80 16,16,16,16,16,16,16,253,12,254, // 90 14,16,16,252,11,8,16,116,16,9, // 100 4,16,16,16,5,16,255,16,13,7, // 110 15,6,16,16,16,16,16,16,16,16, // 120 16,16,16,16,16,16,16,16,16,16, // 130 16,16,16,16,16,16,16,16,16,16, // 140 16,16,16,16,16,16,16,16,16,16, // 150 16,16,16,16,16,16,16,16,16,16, // 160 16,16,16,16,16,16,16,16,16,16, // 170 16,16,16,16,16,16,16,16,16,16, // 180 16,16,16,16,16,16,16,16,16,16, // 190 16,16,16,16,16,16,16,16,16,16, // 200 16,16,16,16,16,16,16,16,16,16, // 210 16,16,16,16,16,16,16,16,16,16, // 220 16,16,16,16,16,16,16,16,16,16, // 230 16,16,16,16,16,16,16,16,16,16, // 240 16,16,16,16,16,16}; // 250 __constant__ char indexToNuc[256] = {'G', 'A', 'C', 'T', 'N', 'R', 'Y', 'W', 'S', 'M', // 9 'K', 'H', 'B', 'V', 'D', 'X', '\0','\0','\0','\0', // 19 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 29 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 39 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 49 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 59 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 69 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 79 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 89 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 99 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 109 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 119 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 129 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 139 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 149 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 159 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 169 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 179 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 189 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 199 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 209 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 219 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 229 '\0','\0','\0','\0','\0','\0','\0','\0','\0','\0', // 239 '\0','\0','\0','\0', 'g', 'a', 'c', 't', 'g', 'a', // 249 'c', 't','g','a','c','t'}; // 255 __device__ int _toupper_(int ch) { if ((unsigned int)(ch - 'a') < 26u ) ch += 'A' - 'a'; return ch; } __device__ char * _strcpy_(char *s1, char *s2) { char *os1; os1 = s1; while (*s1++ = *s2++) ; return(os1); } __device__ char * _strncpy_(char *dst, const char *src,register size_t n) { if (n != 0) { register char *d = dst; register const char *s = src; do { if ((*d++ = *s++) == 0) { /* NUL pad the remaining n-1 bytes */ while (--n != 0) *d++ = 0; break; } } while (--n != 0); } return (dst); } //Check each char inside this read, only "A/C/T/G" allowed in the fasta file __device__ int PrepareSequence(char *read) { int p; int return_value = 1; for (p = 0; p < READ_LENGTH; p++ ) { read[p] = _toupper_(read[p]); if (!(read[p] == 'A' || read[p] == 'C' || read[p] == 'T' || read[p] == 'G')) { return_value = 0; break; } } return return_value; } //Check whether bloom filter contains "string key" __device__ bool contains(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; unsigned int b = 378551; unsigned int a = 63689; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_RSHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = hash * a + (str[i]); a = a * b; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_JSHash_ hash = 1315423911; i=0; for(i = 0; i < len; i++) { hash ^= ((hash << 5) + (str[i]) + (hash >> 2)); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_PJWHash_ unsigned int ThreeQuarters = (unsigned int)(((unsigned int)(sizeof(unsigned int) * 8) * 3) / 4); unsigned int HighBits = (unsigned int)(0xFFFFFFFF) << (sizeof(unsigned int) * 7); hash= 0; a= 0; i= 0; for(i = 0; i < len; i++) { hash = (hash << sizeof(unsigned int)) + (str[i]); if((a = hash & HighBits) != 0) { hash = (( hash ^ (a >> ThreeQuarters)) & (~HighBits)); } } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_ELFHash_ hash=0;i=0;a=0; for(i = 0; i < len; i++) { hash = (hash << 4) + (str[i]); if((a = hash & 0xF0000000L) != 0) { hash ^= (a >> 24); } hash &= ~a; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); ; if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_BKDRHash_ hash=0;i=0;a=131; for(i = 0; i < len; i++) { hash = (hash * a) + (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_SDBMHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = (str[i]) + (hash << 6) + (hash << 16) - hash; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_DJBHash_ hash = 5381;i=0; for(i = 0; i < len; i++) { hash = ((hash << 5) + hash) + (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } return true; } __device__ bool contains2(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_DEKHash_ hash = len;i=0; for(i = 0; i < len; i++) { hash = ((hash << 5) ^ (hash >> 27)) ^ (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } /* //_BPHash_ hash=0;i=0; for(i = 0; i < len; i++) { hash = hash << 7 ^ (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_FNVHash_ a = 0x811C9DC5; hash= 0; i= 0; for(i = 0; i < len; i++) { hash *= a; hash ^= (str[i]); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_APHash_ hash = 0xAAAAAAAA;i=0; for(i = 0; i < len; i++) { hash ^= ((i & 1) == 0) ? ( (hash << 7) ^ (str[i]) * (hash >> 3)) : (~((hash << 11) + (str[i]) ^ (hash >> 5))); } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } */ return true; } __device__ bool contains3(char *key, unsigned int table_size) { unsigned int hash, bit, index,len; unsigned char bloom; unsigned int i; len = TUPLE_SIZE; char str[TUPLE_SIZE+1]; _strncpy_(str, key,TUPLE_SIZE); str[TUPLE_SIZE]=0; //_krHash_ hash = 0; for(i = 0; i < len; i++) { hash += str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_ocaml_hash_ hash=0;i=0; for (i=0; i<len; i++) { hash = hash*19 + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_sml_hash_ hash=0;i=0; for (i=0; i<len; i++) { hash = 33*hash + 720 + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } //_stl_hash_ _strncpy_(str, key,TUPLE_SIZE); hash=0;i=0; for (i=0; i<len; i++) { hash = 5*hash + str[i]; } hash = hash % (table_size * _char_size_); bit = hash % _char_size_; index = hash / _char_size_ ; bloom = tex1Dfetch( tex, index); if ((bloom & _bit_mask_[bit]) != _bit_mask_[bit]) { return false; } return true; } //search tuple from bloon filter __device__ int lstspct_FindTuple(char *tuple, int numTuples) { //check whether in the bloom filter //if(contains(tuple,numTuples * 4)&&contains2(tuple,numTuples * 4)&&contains3(tuple,numTuples * 4)) if(contains(tuple,numTuples * BLOOM_SIZE)&&contains2(tuple,numTuples * BLOOM_SIZE)) return 1; else return -1; } __device__ int d_strTpl_Valid(char *st) { int i; int return_value = 1; if (st == NULL) return_value = 0; else { for (i = 0; i < TUPLE_SIZE; i++) { if (numeric_nuc_index[st[i]] >= 4) { return_value = 0; break; } } } return return_value; } //check whether the read is solid or not, by examine each tuple in this read, whether can be found or not in //all the string tuple list __device__ int CheckSolid(char *seq, int tupleSize, int numTuples){ int p; char tuple[TUPLE_SIZE+1]; int return_value = 1; for (p = 0; p < READ_LENGTH - tupleSize +1; p++ ){ _strncpy_(tuple, (char*) &seq[p],tupleSize); tuple[tupleSize] = 0; if (lstspct_FindTuple(tuple,numTuples) == -1) { return_value = 0; break; } } return return_value; } __device__ int SolidSubsequence(char *seq, int tupleSize, int &seqStart, int &seqEnd, int numTuples) { int i; int solidSubsequence = 1; //char tempTuple[TUPLE_SIZE+1]; char *tempTuple; for (i = seqStart; i < seqEnd - tupleSize + 1; i++) { //_strncpy_(tempTuple , (char*) &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple,numTuples) == -1) { solidSubsequence = 0; break; } } return solidSubsequence; } __device__ int TrimSequence(char *seq, int tupleSize, int &seqStart, int &seqEnd, int numTuples,int maxTrim) { int i; seqStart = 0; int flag = 1; //char tempTuple[TUPLE_SIZE+1]; char *tempTuple; //get length of this read int len = seq[READ_LENGTH + 1]; for (i = 0; i < len - tupleSize + 1; i++ ) { //_strncpy_(tempTuple , &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple,numTuples) != -1) { break; } // Not solid yet, advance seqStart++; } seqEnd = len; for (i = seqStart + 1; i < len - tupleSize + 1; i++ ) { //_strncpy_(tempTuple , &seq[i],tupleSize); //tempTuple[tupleSize] = 0; tempTuple = &seq[i]; if (lstspct_FindTuple(tempTuple, numTuples) == -1) { break; } } if (i == len - tupleSize) // The sequence is not trimmed. seqEnd = len - 1; else // The sequence is trimmed. Trim end is the index of the first // 'bad' nucleotide. Since seqStart is the index of the first // 'good' nucleotide, seqEnd - seqStart is the length of the // untrimmed seq. In other words, it's half open 0 based // indexing. seqEnd = i + tupleSize-1; if (seqStart > maxTrim) // return 0; flag = 0; else if (len - seqEnd > maxTrim) //return 0; flag = 0; else if(SolidSubsequence(seq, tupleSize, seqStart, seqEnd,numTuples) == 0) // return 0; flag = 0; else { int newLength = seqEnd - seqStart + 1; for (int s = 0; s < newLength; s++ ) { seq[s] = seq[s + seqStart]; } //seq.length = newLength -1; len = newLength -1; } //save the new length //_strncpy_(&seq[READ_LENGTH + 1], itoa1(len),4); //itoa1(len,&seq[READ_LENGTH + 1]); seq[READ_LENGTH + 1] = len; return flag; } //////////////////////////////////////////////////////////////////////////////// //! Fix two errors, step 1 kernel function //! @param d_reads_arr input data in global memory //! @param d_param input data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void fix_errors1(char *d_reads_arr,Param *d_param) { short numSearch=1; nextNuc['G'] = 'A'; nextNuc['A'] = 'C'; nextNuc['C'] = 'T'; nextNuc['T'] = 'G'; int c_tid = blockIdx.x * blockDim.x + threadIdx.x; int round = 0; int total_thread = BLOCK * THREAD; int discardSeq=0; int trimStart=0, trimEnd=0; int chunk_bound = (total_thread < MAX_READS_BOUND ? total_thread:MAX_READS_BOUND); round = d_param->NUM_OF_READS/chunk_bound + (d_param->NUM_OF_READS%chunk_bound == 0 ? 0:1); int maxPos[READ_LENGTH * 4],maxMod[READ_LENGTH * 4]; unsigned char votes[READ_LENGTH][4],mutNuc, mutNuc2, prev, cur; int solid[READ_LENGTH]; int s,i,j,m,n,startPos, fixPos=-1,numFixed = 0,numChanges=0; short return_value = 0,flag = 0,flag1=1; // Cast votes for mutations int p,vp,mut; short numAboveThreshold = 0,newLength,len; short maxVotes = 0,allGood = 1; int numTies = -1,pindex = 0, mod, pos,current_read_idx; char *tempTuple, *read; for(i=0;i<round;i++) { flag = 0;flag1=1;numFixed = 0; numChanges=0; return_value = 0;discardSeq = 0; current_read_idx = c_tid + chunk_bound * i; //check if run out of reads current_read_idx = (current_read_idx > d_param->NUM_OF_READS ? 0:current_read_idx); //take 1 read per thread read = &d_reads_arr[current_read_idx*(READ_LENGTH + 2)]; //get length of this read len = read[READ_LENGTH + 1]; if (!PrepareSequence(read)) { discardSeq = 1; } else { numFixed = 0; fixPos = -1; do{ if(flag) break; else{ if (fixPos > 0) startPos = fixPos; else startPos = 0; for (m = 0; m < READ_LENGTH; m++) { for (int n = 0; n < 4; n++) //votes[threadIdx.x][m][n] = 0; votes[m][n] = 0; } for(m=0;m<READ_LENGTH;m++) solid[m] = 0; for (p = startPos; p < len - d_param->tupleSize + 1; p++ ){ tempTuple = &read[p]; if (d_strTpl_Valid(tempTuple)){ if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) solid[p] = 1; else{ for (vp = 0; vp < d_param->tupleSize; vp++){ mutNuc = nextNuc[read[p + vp]]; read[p + vp] = mutNuc; for (mut = 0; mut < 3; mut++ ){ tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) votes[vp + p][unmasked_nuc_index[mutNuc]]++; mutNuc = nextNuc[mutNuc]; read[p + vp] = mutNuc; } } } } } ////////////////vote completed////////////////////// ++numFixed; //////////////////////fix sequence based on voting in previous step////////////// fixPos = 0;numAboveThreshold = 0;maxVotes = 0;allGood = 1; for (p = 0; p < len - d_param->tupleSize + 1; p++ ) { if (solid[p] == 0) { allGood = 0;break; } } if (allGood) // no need to fix this sequence return_value = 1; else { for (p = 0; p < len; p++){ for (m = 0; m < 4; m++){ if (votes[p][m] > d_param->minVotes) numAboveThreshold++; if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } pindex = 0;numTies = -1; // Make sure there aren't multiple possible fixes for (p = 0; p < len; p++){ for (m = 0; m < 4; m++){ if (votes[p][m] == maxVotes){ numTies++; maxPos[pindex] = p; maxMod[pindex] = m; pindex++; } } } if (numAboveThreshold > 0 ){ if (numTies < numSearch || (pindex > 1 && maxPos[0] != maxPos[1])){ // Found at least one change to the sequence for (s = 0; s < numSearch && s < pindex; s++) { mod = maxMod[s]; pos = maxPos[s]; fixPos = pos; if (mod < 4){ prev = read[pos]; cur = nuc_char[mod]; read[pos] = cur; } } if( CheckSolid(read,d_param->tupleSize,d_param->numTuples)) return_value = 1; else{ //reset return_value = 0; //read[pos] = prev; } } else { return_value = 0; } } else { return_value = 0; } } //check fix sequence return if( return_value) { flag = 1; numChanges = numFixed; break; } } } while (fixPos > 0); /////////////////////////end of solidify//////// if (numChanges != 0){ if (numChanges > d_param->maxMods) discardSeq = 1; else discardSeq = 0; } else{ if( d_param->numSearch == 2){ //removed trim in fix error1 discardSeq = 1; } else { // Find the locations of the first solid positions. if (d_param->doTrim) { if(TrimSequence(read, d_param->tupleSize,trimStart, trimEnd, d_param->numTuples,d_param->maxTrim)){ // If there is space for one solid tuple (trimStart < trimEnd - ts+1) // and the subsequence between the trimmed ends is ok, print the // trimmed coordinates. discardSeq = 0; } else discardSeq = 1; } else discardSeq = 1; } } } if (discardSeq) { read[READ_LENGTH] = 'D'; //F fixed, D: not fixed, discard } else { read[READ_LENGTH] = 'F'; //F fixed, D: not fixed, discard } __syncthreads(); } } //////////////////////////////////////////////////////////////////////////////// //! Fix two errors step 2 kernel function //! @param d_reads_arr input data in global memory //! @param d_param input data in global memory //////////////////////////////////////////////////////////////////////////////// __global__ void fix_errors2(char *d_reads_arr,Param *d_param, int numReads) { short numSearch = 2; nextNuc['G'] = 'A'; nextNuc['A'] = 'C'; nextNuc['C'] = 'T'; nextNuc['T'] = 'G'; int c_tid = blockIdx.x * blockDim.x + threadIdx.x; int round = 0; int total_thread = BLOCK * THREAD; int discardSeq=0; int trimStart=0, trimEnd=0; int chunk_bound = (total_thread < MAX_READS_BOUND ? total_thread:MAX_READS_BOUND); round = numReads/chunk_bound + (numReads%chunk_bound == 0 ? 0:1); int maxPos[READ_LENGTH * 4]; int maxMod[READ_LENGTH * 4]; unsigned char votes[READ_LENGTH][4],mutNuc, mutNuc2, prev, cur; int solid[READ_LENGTH]; //__shared__ unsigned char solid[READ_LENGTH]; int s,i,j,m,n; int startPos, fixPos=-1; int numFixed = 0,numChanges=0; short return_value = 0,flag = 0,flag1=1; // Cast votes for mutations int p,vp,mut; short numAboveThreshold = 0; short maxVotes = 0,allGood = 1; int numTies = -1; int pindex = 0; int mod, pos; short newLength,len; int current_read_idx; char *tempTuple, *read; /* Since GPU cannot process all reads at the same time (limited block NO.), the reads are divided into several rounds to process. */ for(i=0;i<round;i++) { flag = 0; flag1=1;numFixed = 0;numChanges=0;return_value = 0; current_read_idx = c_tid + chunk_bound * i; //check if run out of reads current_read_idx = (current_read_idx > numReads ? 0:current_read_idx); //take 1 read per thread read = &d_reads_arr[current_read_idx*(READ_LENGTH + 2)]; //get length of this read len = read[READ_LENGTH + 1]; discardSeq = 0; if (!PrepareSequence(read)) discardSeq = 1; else { numFixed = 0; fixPos = -1; do { if(flag) break; else{ if (fixPos > 0) startPos = fixPos; else startPos = 0; for (m = 0; m < READ_LENGTH; m++) { for (int n = 0; n < 4; n++) votes[m][n] = 0; } for(m=0;m<READ_LENGTH;m++) solid[m] = 0; for (p = startPos; p < len - d_param->tupleSize + 1; p++ ) { tempTuple = &read[p]; if (d_strTpl_Valid(tempTuple)) { if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) solid[p] = 1; else{ for (vp = 0; vp < d_param->tupleSize-1; vp++) { mutNuc = nextNuc[read[p + vp]]; read[p + vp] = mutNuc; for (mut = 0; mut < 3; mut++ ) { tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) { votes[vp + p][unmasked_nuc_index[mutNuc]]++; } //delta = 2 for(m=vp+1;m<d_param->tupleSize;m++) { mutNuc2 = nextNuc[read[p + m]]; read[p + m] = mutNuc2; for(n=0;n<3;n++) { tempTuple = &read[p]; if (lstspct_FindTuple(tempTuple, d_param->numTuples) != -1) { votes[vp + p][unmasked_nuc_index[mutNuc]]++;//history votes[m + p][unmasked_nuc_index[mutNuc2]]++; } mutNuc2 = nextNuc[mutNuc2]; read[p + m] = mutNuc2; } } mutNuc = nextNuc[mutNuc]; read[p + vp] = mutNuc; } } } } } ++numFixed; //fix sequence based on voting in previous step fixPos = 0;numAboveThreshold = 0;maxVotes = 0;allGood = 1; for (p = 0; p < len - d_param->tupleSize + 1; p++ ) { if (solid[p] == 0) { allGood = 0; break; } } if (allGood) // no need to fix this sequence return_value = 1; else{ for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] > d_param->minVotes) numAboveThreshold++; if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } pindex = 0;numTies = -1; for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] == maxVotes) { numTies++; maxPos[pindex] = p; maxMod[pindex] = m; pindex++; } } } //second votes[p][m] = 0; maxVotes = 0; for (p = 0; p < len ; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] >= maxVotes) maxVotes = votes[p][m]; } } for (p = 0; p < len; p++){ for (m = 0; m < 4; m++) { if (votes[p][m] == maxVotes) { maxPos[pindex] = p; maxMod[pindex] = m; //pindex++; } } } __syncthreads(); if (numAboveThreshold > 0 ) { //if (numTies < numSearch || (pindex > 1 && maxPos[0] != maxPos[1])){ // Found at least one change to the sequence for (s = 0; s < 2; s++) { mod = maxMod[s]; pos = maxPos[s]; fixPos = pos; if (mod < 4){ prev = read[pos]; cur = nuc_char[mod]; read[pos] = nuc_char[mod]; } } return_value = CheckSolid(read,d_param->tupleSize,d_param->numTuples); //} //else { return_value = 0; //} } else { return_value = 0; } __syncthreads(); } //check fix sequence return if( return_value){ flag = 1; numChanges = numFixed; break; } }//if flag } while (fixPos > 0); /////////////////////////end of solidify//////// if (numChanges != 0) { if (numChanges > d_param->maxMods){ discardSeq = 1; //_strncpy_(read , original, READ_LENGTH + 2); } else { discardSeq = 0; } } else { // Find the locations of the first solid positions. if (d_param->doTrim){ if(TrimSequence(read, d_param->tupleSize,trimStart, trimEnd, d_param->numTuples,d_param->maxTrim)){ // If there is space for one solid tuple (trimStart < trimEnd - ts+1) // and the subsequence between the trimmed ends is ok, print the // trimmed coordinates. discardSeq = 0; } else discardSeq = 1; } else { discardSeq = 1; } } } if (discardSeq) { read[READ_LENGTH] = 'D'; //last char for indicator } else { read[READ_LENGTH] = 'F'; //F fixed, D: not fixed, discard } __syncthreads(); } } #endif // #ifndef _FIXERRORSVOTING_KERNEL_H_
6ec4ff927f1059f4ebe6c2293b48a931bacbaf6c.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "bfs_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Node *d_graph_nodes = NULL; hipMalloc(&d_graph_nodes, XSIZE*YSIZE); int *d_edge_list = NULL; hipMalloc(&d_edge_list, XSIZE*YSIZE); bool *d_graph_level = NULL; hipMalloc(&d_graph_level, XSIZE*YSIZE); bool *d_graph_visited = NULL; hipMalloc(&d_graph_visited, XSIZE*YSIZE); int *d_cost = NULL; hipMalloc(&d_cost, XSIZE*YSIZE); bool *loop = NULL; hipMalloc(&loop, XSIZE*YSIZE); int no_of_nodes = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( bfs_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( bfs_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( bfs_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
6ec4ff927f1059f4ebe6c2293b48a931bacbaf6c.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "bfs_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; Node *d_graph_nodes = NULL; cudaMalloc(&d_graph_nodes, XSIZE*YSIZE); int *d_edge_list = NULL; cudaMalloc(&d_edge_list, XSIZE*YSIZE); bool *d_graph_level = NULL; cudaMalloc(&d_graph_level, XSIZE*YSIZE); bool *d_graph_visited = NULL; cudaMalloc(&d_graph_visited, XSIZE*YSIZE); int *d_cost = NULL; cudaMalloc(&d_cost, XSIZE*YSIZE); bool *loop = NULL; cudaMalloc(&loop, XSIZE*YSIZE); int no_of_nodes = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); bfs_kernel<<<gridBlock,threadBlock>>>(d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { bfs_kernel<<<gridBlock,threadBlock>>>(d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { bfs_kernel<<<gridBlock,threadBlock>>>(d_graph_nodes,d_edge_list,d_graph_level,d_graph_visited,d_cost,loop,no_of_nodes); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2fe177efccafb64218516868a68be92b646ce9e2.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <hip/hip_runtime.h> #include <cstdlib> #include <time.h> #include <stdio.h> #include <fstream> using namespace std; __global__ void unfoldkernel(bool* a, bool*mask, bool* c, int n){ int i = blockIdx.x*blockDim.x+ threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if(i<n && j<n) c[i*n+j] = a[i*n+j]&mask[j]; } void print(bool* a, int m, int n){ for(int i=0;i<m;i++){ for(int j=0;j<n;j++) cout << a[i*n+j] << '\t'; cout << endl; } cout << endl; } int main(int argc, char* argv[]){ //Initialisation variables ifstream in; in.open(argv[1]); int n= atoi(argv[2]); int iterations = atoi(argv[3]); //clock variables clock_t start, end; double gpu_time_used, cpu_time_used; //Threads and block configuration dim3 threadsPerBlock(16,16); dim3 numBlocks((n+threadsPerBlock.x-1)/threadsPerBlock.x, (n+threadsPerBlock.y-1)/threadsPerBlock.y); //sizes of matrices int size_mat = sizeof(bool) * n * n; int size_res = size_mat; int size_mask = sizeof(bool)*n; //memory allocation in host machine bool* mat = (bool*)malloc(size_mat); bool* res = (bool*)malloc(size_res); bool* mask = (bool*)malloc(size_mask); //Initializing matrices for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++) in >> mat[i*n+j]; } for(int i = 0; i < n; i++) in >> mask[i]; bool *d_mat, *d_mask, *d_res; start = clock(); //Memory allocation in GPU hipMalloc((void**)&d_mat, size_mat); hipMalloc((void**)&d_mask, size_mask); hipMalloc((void**)&d_res, size_res); //copy data from host to GPU hipMemcpy(d_mat, mat, size_mat, hipMemcpyHostToDevice); hipMemcpy(d_mask, mask, size_mask, hipMemcpyHostToDevice); for(int k=0; k<iterations; k++) hipLaunchKernelGGL(( unfoldkernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, d_mat, d_mask,d_res, n); //copying result hipMemcpy(res, d_res, size_res, hipMemcpyDeviceToHost); end = clock(); //calculating time taken by GPU gpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC; //CPU computation start=clock(); for(int k = 0; k < iterations; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++) res[j] = mat[i*n+j]&mask[j]; } } end = clock(); //calculating time taken by CPU cpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC; cout << "CPU/GPU: " << cpu_time_used/gpu_time_used << endl; return 0; }
2fe177efccafb64218516868a68be92b646ce9e2.cu
#include <iostream> #include <cuda.h> #include <cstdlib> #include <time.h> #include <stdio.h> #include <fstream> using namespace std; __global__ void unfoldkernel(bool* a, bool*mask, bool* c, int n){ int i = blockIdx.x*blockDim.x+ threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; if(i<n && j<n) c[i*n+j] = a[i*n+j]&mask[j]; } void print(bool* a, int m, int n){ for(int i=0;i<m;i++){ for(int j=0;j<n;j++) cout << a[i*n+j] << '\t'; cout << endl; } cout << endl; } int main(int argc, char* argv[]){ //Initialisation variables ifstream in; in.open(argv[1]); int n= atoi(argv[2]); int iterations = atoi(argv[3]); //clock variables clock_t start, end; double gpu_time_used, cpu_time_used; //Threads and block configuration dim3 threadsPerBlock(16,16); dim3 numBlocks((n+threadsPerBlock.x-1)/threadsPerBlock.x, (n+threadsPerBlock.y-1)/threadsPerBlock.y); //sizes of matrices int size_mat = sizeof(bool) * n * n; int size_res = size_mat; int size_mask = sizeof(bool)*n; //memory allocation in host machine bool* mat = (bool*)malloc(size_mat); bool* res = (bool*)malloc(size_res); bool* mask = (bool*)malloc(size_mask); //Initializing matrices for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++) in >> mat[i*n+j]; } for(int i = 0; i < n; i++) in >> mask[i]; bool *d_mat, *d_mask, *d_res; start = clock(); //Memory allocation in GPU cudaMalloc((void**)&d_mat, size_mat); cudaMalloc((void**)&d_mask, size_mask); cudaMalloc((void**)&d_res, size_res); //copy data from host to GPU cudaMemcpy(d_mat, mat, size_mat, cudaMemcpyHostToDevice); cudaMemcpy(d_mask, mask, size_mask, cudaMemcpyHostToDevice); for(int k=0; k<iterations; k++) unfoldkernel<<<numBlocks, threadsPerBlock>>>(d_mat, d_mask,d_res, n); //copying result cudaMemcpy(res, d_res, size_res, cudaMemcpyDeviceToHost); end = clock(); //calculating time taken by GPU gpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC; //CPU computation start=clock(); for(int k = 0; k < iterations; k++){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++) res[j] = mat[i*n+j]&mask[j]; } } end = clock(); //calculating time taken by CPU cpu_time_used = ((double)(end-start))/CLOCKS_PER_SEC; cout << "CPU/GPU: " << cpu_time_used/gpu_time_used << endl; return 0; }
37eb82ef25585d15fca6962918445d14c58fcd94.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdio> #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ hipError_t e = hipGetLastError(); \ if (hipSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ hipGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif // kernel program for the device (GPU): compiled by NVCC __global__ void addKernel(int* c, const int* a, const int* b) { int x = threadIdx.x; int y = threadIdx.y; int i = y * (blockDim.x) + x; // [y][x] = y * WIDTH + x; c[i] = a[i] + b[i]; } // main program for the CPU: compiled by MS-VC++ int main(void) { // host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; // make a, b matrices for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { a[y][x] = y * 10 + x; b[y][x] = (y * 10 + x) * 100; } } // device-side data int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; // allocate device memory CUDA_CHECK( hipMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( hipMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( hipMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int)) ); // copy from host to device CUDA_CHECK( hipMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice) ); CUDA_CHECK( hipMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), hipMemcpyHostToDevice) ); // launch a kernel on the GPU with one thread for each element. dim3 dimBlock(WIDTH, WIDTH, 1); // x, y, z hipLaunchKernelGGL(( addKernel) , dim3(1), dim3(dimBlock), 0, 0, dev_c, dev_a, dev_b); // dev_c = dev_a + dev_b; CUDA_CHECK( hipPeekAtLastError() ); // copy from device to host CUDA_CHECK( hipMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int), hipMemcpyDeviceToHost) ); // free device memory CUDA_CHECK( hipFree(dev_c) ); CUDA_CHECK( hipFree(dev_a) ); CUDA_CHECK( hipFree(dev_b) ); // print the result for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { printf("%5d", c[y][x]); } printf("\n"); } // done return 0; }
37eb82ef25585d15fca6962918445d14c58fcd94.cu
#include <cstdio> #if defined(NDEBUG) #define CUDA_CHECK(x) (x) #else #define CUDA_CHECK(x) do {\ (x); \ cudaError_t e = cudaGetLastError(); \ if (cudaSuccess != e) { \ printf("cuda failure \"%s\" at %s:%d\n", \ cudaGetErrorString(e), \ __FILE__, __LINE__); \ exit(1); \ } \ } while (0) #endif // kernel program for the device (GPU): compiled by NVCC __global__ void addKernel(int* c, const int* a, const int* b) { int x = threadIdx.x; int y = threadIdx.y; int i = y * (blockDim.x) + x; // [y][x] = y * WIDTH + x; c[i] = a[i] + b[i]; } // main program for the CPU: compiled by MS-VC++ int main(void) { // host-side data const int WIDTH = 5; int a[WIDTH][WIDTH]; int b[WIDTH][WIDTH]; int c[WIDTH][WIDTH] = { 0 }; // make a, b matrices for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { a[y][x] = y * 10 + x; b[y][x] = (y * 10 + x) * 100; } } // device-side data int* dev_a = 0; int* dev_b = 0; int* dev_c = 0; // allocate device memory CUDA_CHECK( cudaMalloc((void**)&dev_a, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_b, WIDTH * WIDTH * sizeof(int)) ); CUDA_CHECK( cudaMalloc((void**)&dev_c, WIDTH * WIDTH * sizeof(int)) ); // copy from host to device CUDA_CHECK( cudaMemcpy(dev_a, a, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice) ); CUDA_CHECK( cudaMemcpy(dev_b, b, WIDTH * WIDTH * sizeof(int), cudaMemcpyHostToDevice) ); // launch a kernel on the GPU with one thread for each element. dim3 dimBlock(WIDTH, WIDTH, 1); // x, y, z addKernel <<< 1, dimBlock>>>(dev_c, dev_a, dev_b); // dev_c = dev_a + dev_b; CUDA_CHECK( cudaPeekAtLastError() ); // copy from device to host CUDA_CHECK( cudaMemcpy(c, dev_c, WIDTH * WIDTH * sizeof(int), cudaMemcpyDeviceToHost) ); // free device memory CUDA_CHECK( cudaFree(dev_c) ); CUDA_CHECK( cudaFree(dev_a) ); CUDA_CHECK( cudaFree(dev_b) ); // print the result for (int y = 0; y < WIDTH; ++y) { for (int x = 0; x < WIDTH; ++x) { printf("%5d", c[y][x]); } printf("\n"); } // done return 0; }
935408553a900627d8e8a500573fac1e91de2042.hip
// !!! This is a file automatically generated by hipify!!! //jacobi7.cu #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <cuda_call.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include "getopt.h" #include "include/jacobi7_cuda.h" #include "include/jacobi7.h" //#ifndef TIME_TILE_SIZE //#warning TIME_TILE_SIZE is not set, defaulting to 1 //#define TIME_TILE_SIZE 2 //#endif // Timer function double rtclock(){ struct timeval tp; gettimeofday(&tp, NULL); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main(int argc, char* *argv){ if(argc != 8) { printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TZ> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int nx = atoi(argv[1]); const int ny = atoi(argv[2]); const int nz = atoi(argv[3]); const int tx = atoi(argv[4]); const int ty = atoi(argv[5]); const int tz = atoi(argv[6]); const int timesteps = atoi(argv[7]); const int xyz = nx * ny * nz; const int xyz_byetes = xyz * sizeof(float); float *h_dA; float *h_dB; float *d_dA; float *d_dB; float *h_dA1; float *h_dB1; // Allocate host buffers h_dA = (float*) malloc(xyz_byetes); h_dB = (float*) malloc(xyz_byetes); h_dA1 = (float*) malloc(xyz_byetes); h_dB1 = (float*) malloc(xyz_byetes); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_dA[i] = 1 + (float)rand() / (float)RAND_MAX; h_dB[i] = h_dA[i]; h_dA1[i] = h_dA[i]; h_dB1[i] = h_dA[i]; } printf("Start computing..."); printf("h_dB[%d]:%f\n", 2+32*(3+32*4), h_dB[2+32*(3+32*4)]); printf("h_dA[%d]:%f\n", 2+32*(3+32*4), h_dA[2+32*(3+32*4)]); float *B = 0; const int ldb = 0; const int ldc = 0; // Always use device 0 hipSetDevice(0); /* set the ratio of cache/shared memory hipFuncCachePreferNone: Default function cache configuration, no preference hipFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache hipFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory */ CHECK_CALL(hipDeviceSetCacheConfig(hipFuncCachePreferShared)); // Allocate device buffers CHECK_CALL(hipMalloc((void**)&d_dA, xyz_byetes)); CHECK_CALL(hipMalloc((void**)&d_dB, xyz_byetes)); // Copy to device CHECK_CALL(hipMemcpy(d_dA, h_dA, xyz_byetes, hipMemcpyHostToDevice)); //CHECK_CALL(hipMemcpy(d_dB, h_dB, xyz_byetes, hipMemcpyHostToDevice)); CHECK_CALL(hipMemcpy(d_dB, d_dA, xyz_byetes, hipMemcpyDeviceToDevice)); // Setup the kernel float* input = d_dA; float* output = d_dB; dim3 grid(nx/tx, ny/ty, nz/tz); dim3 block(tx + 2*(TIME_TILE_SIZE-1), ty + 2*(TIME_TILE_SIZE-1), tz + 2*(TIME_TILE_SIZE-1)); float *tmp; float *tmp1; float fac = 6.0/(h_dA[0] * h_dA[0]); const int sharedMemSize = (block.x) * (block.y) * (block.z) * sizeof(float); double startTime = rtclock(); // Run the GPU kernel for(int t = 0; t < timesteps; t += TIME_TILE_SIZE) { hipLaunchKernelGGL(( jacobi3d_7p_overlap), dim3(grid), dim3(block), sharedMemSize, 0, input, output, nx, ny, nz, fac); // swap input and output tmp = input; input = output; output = tmp; } SYNC_DEVICE(); ASSERT_STATE("Kernel"); double endTime = rtclock(); double elapsedTimeG = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeG); double flops = xyz * 7.0 * timesteps; double gflops = flops / elapsedTimeG / 1e9; printf("(GPU) %lf GFlop/s\n", gflops); // Copy the result to main memory CHECK_CALL(hipMemcpy(h_dB, input, xyz_byetes, hipMemcpyDeviceToHost)); // Run the CPU version startTime = rtclock(); for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_dA1, B, ldb, h_dB1, ldc); tmp1 = h_dA1; h_dA1 = h_dB1; h_dB1 = tmp1; } endTime = rtclock(); double elapsedTimeC = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeC); flops = xyz * 7.0 * timesteps; gflops = flops / elapsedTimeC / 1e9; printf("(CPU) %lf GFlop/s\n", gflops); // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; for (; i < xyz; ++i){ diff = h_dA1[i] - h_dB[i]; errorNorm += diff * diff; refNorm += h_dA1[i] * h_dA1[i]; /*if (h_dB[i+nx*(j+ny*k)] != h_dA1[i+nx*(j+ny*k)]) diff = 1;*/ } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correct ness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("h_dB[%d]:%f\n", 2+ny*(3+nz*4), h_dB[2+ny*(3+nz*4)]); printf("h_dA[%d]:%f\n", 2+ny*(3+nz*4), h_dA[2+ny*(3+nz*4)]); printf("h_dB1[%d]:%f\n", 2+ny*(3+nz*4), h_dB1[2+ny*(3+nz*4)]); printf("h_dA1[%d]:%f\n", 2+ny*(3+nz*4), h_dA1[2+ny*(3+nz*4)]); printf("-----------------------------------\n"); printf("h_dB[%d]:%f\n", 3+ny*(4+nz*5), h_dB[3+ny*(4+nz*5)]); printf("h_dA[%d]:%f\n", 3+ny*(4+nz*5), h_dA[3+ny*(4+nz*5)]); printf("h_dB1[%d]:%f\n", 3+ny*(4+nz*5), h_dB1[3+ny*(4+nz*5)]); printf("h_dA1[%d]:%f\n", 3+ny*(4+nz*5), h_dA1[3+ny*(4+nz*5)]); // Free buffers free(h_dA); free(h_dB); free(h_dA1); free(h_dB1); CHECK_CALL(hipFree(d_dA)); CHECK_CALL(hipFree(d_dB)); }
935408553a900627d8e8a500573fac1e91de2042.cu
//jacobi7.cu #include <cuda.h> #include <cuda_runtime.h> #include <cuda_call.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include "getopt.h" #include "include/jacobi7_cuda.h" #include "include/jacobi7.h" //#ifndef TIME_TILE_SIZE //#warning TIME_TILE_SIZE is not set, defaulting to 1 //#define TIME_TILE_SIZE 2 //#endif // Timer function double rtclock(){ struct timeval tp; gettimeofday(&tp, NULL); return (tp.tv_sec + tp.tv_usec*1.0e-6); } int main(int argc, char* *argv){ if(argc != 8) { printf("USAGE: %s <NX> <NY> <NZ> <TX> <TY> <TZ> <TIME STEPS>\n", argv[0]); return 1; } // program parameters trans const int nx = atoi(argv[1]); const int ny = atoi(argv[2]); const int nz = atoi(argv[3]); const int tx = atoi(argv[4]); const int ty = atoi(argv[5]); const int tz = atoi(argv[6]); const int timesteps = atoi(argv[7]); const int xyz = nx * ny * nz; const int xyz_byetes = xyz * sizeof(float); float *h_dA; float *h_dB; float *d_dA; float *d_dB; float *h_dA1; float *h_dB1; // Allocate host buffers h_dA = (float*) malloc(xyz_byetes); h_dB = (float*) malloc(xyz_byetes); h_dA1 = (float*) malloc(xyz_byetes); h_dB1 = (float*) malloc(xyz_byetes); // grid data iniatialization // randomly generaed test data srand(time(NULL)); int i = 0; for(; i < xyz; i++) { h_dA[i] = 1 + (float)rand() / (float)RAND_MAX; h_dB[i] = h_dA[i]; h_dA1[i] = h_dA[i]; h_dB1[i] = h_dA[i]; } printf("Start computing..."); printf("h_dB[%d]:%f\n", 2+32*(3+32*4), h_dB[2+32*(3+32*4)]); printf("h_dA[%d]:%f\n", 2+32*(3+32*4), h_dA[2+32*(3+32*4)]); float *B = 0; const int ldb = 0; const int ldc = 0; // Always use device 0 cudaSetDevice(0); /* set the ratio of cache/shared memory cudaFuncCachePreferNone: Default function cache configuration, no preference cudaFuncCachePreferShared: Prefer larger shared memory and smaller L1 cache cudaFuncCachePreferL1: Prefer larger L1 cache and smaller shared memory */ CHECK_CALL(cudaDeviceSetCacheConfig(cudaFuncCachePreferShared)); // Allocate device buffers CHECK_CALL(cudaMalloc((void**)&d_dA, xyz_byetes)); CHECK_CALL(cudaMalloc((void**)&d_dB, xyz_byetes)); // Copy to device CHECK_CALL(cudaMemcpy(d_dA, h_dA, xyz_byetes, cudaMemcpyHostToDevice)); //CHECK_CALL(cudaMemcpy(d_dB, h_dB, xyz_byetes, cudaMemcpyHostToDevice)); CHECK_CALL(cudaMemcpy(d_dB, d_dA, xyz_byetes, cudaMemcpyDeviceToDevice)); // Setup the kernel float* input = d_dA; float* output = d_dB; dim3 grid(nx/tx, ny/ty, nz/tz); dim3 block(tx + 2*(TIME_TILE_SIZE-1), ty + 2*(TIME_TILE_SIZE-1), tz + 2*(TIME_TILE_SIZE-1)); float *tmp; float *tmp1; float fac = 6.0/(h_dA[0] * h_dA[0]); const int sharedMemSize = (block.x) * (block.y) * (block.z) * sizeof(float); double startTime = rtclock(); // Run the GPU kernel for(int t = 0; t < timesteps; t += TIME_TILE_SIZE) { jacobi3d_7p_overlap<<<grid, block, sharedMemSize>>>(input, output, nx, ny, nz, fac); // swap input and output tmp = input; input = output; output = tmp; } SYNC_DEVICE(); ASSERT_STATE("Kernel"); double endTime = rtclock(); double elapsedTimeG = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeG); double flops = xyz * 7.0 * timesteps; double gflops = flops / elapsedTimeG / 1e9; printf("(GPU) %lf GFlop/s\n", gflops); // Copy the result to main memory CHECK_CALL(cudaMemcpy(h_dB, input, xyz_byetes, cudaMemcpyDeviceToHost)); // Run the CPU version startTime = rtclock(); for(int t = 0; t < timesteps; t += 1) { jacobi7(nx, ny, nz, h_dA1, B, ldb, h_dB1, ldc); tmp1 = h_dA1; h_dA1 = h_dB1; h_dB1 = tmp1; } endTime = rtclock(); double elapsedTimeC = endTime - startTime; printf("Elapsed Time:%lf\n", elapsedTimeC); flops = xyz * 7.0 * timesteps; gflops = flops / elapsedTimeC / 1e9; printf("(CPU) %lf GFlop/s\n", gflops); // compare the results btw CPU and GPU version double errorNorm, refNorm, diff; errorNorm = 0.0; refNorm = 0.0; for (; i < xyz; ++i){ diff = h_dA1[i] - h_dB[i]; errorNorm += diff * diff; refNorm += h_dA1[i] * h_dA1[i]; /*if (h_dB[i+nx*(j+ny*k)] != h_dA1[i+nx*(j+ny*k)]) diff = 1;*/ } errorNorm = sqrt(errorNorm); refNorm = sqrt(refNorm); printf("Error Norm:%lf\n", errorNorm); printf("Ref Norm:%lf\n", refNorm); if(abs(refNorm) < 1e-7) { printf("Correctness, FAILED\n"); } else if((errorNorm / refNorm) > 1e-2) { printf("Correct ness, FAILED\n"); } else { printf("Correctness, PASSED\n"); } printf("h_dB[%d]:%f\n", 2+ny*(3+nz*4), h_dB[2+ny*(3+nz*4)]); printf("h_dA[%d]:%f\n", 2+ny*(3+nz*4), h_dA[2+ny*(3+nz*4)]); printf("h_dB1[%d]:%f\n", 2+ny*(3+nz*4), h_dB1[2+ny*(3+nz*4)]); printf("h_dA1[%d]:%f\n", 2+ny*(3+nz*4), h_dA1[2+ny*(3+nz*4)]); printf("-----------------------------------\n"); printf("h_dB[%d]:%f\n", 3+ny*(4+nz*5), h_dB[3+ny*(4+nz*5)]); printf("h_dA[%d]:%f\n", 3+ny*(4+nz*5), h_dA[3+ny*(4+nz*5)]); printf("h_dB1[%d]:%f\n", 3+ny*(4+nz*5), h_dB1[3+ny*(4+nz*5)]); printf("h_dA1[%d]:%f\n", 3+ny*(4+nz*5), h_dA1[3+ny*(4+nz*5)]); // Free buffers free(h_dA); free(h_dB); free(h_dA1); free(h_dB1); CHECK_CALL(cudaFree(d_dA)); CHECK_CALL(cudaFree(d_dB)); }
660dd06a7243297e0dcbdca6b4ed3bcc99a46e75.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "CrowdPressure.h" #include "TransferHelper.h" #include "DataBase.h" #include <cassert> #include "CudaHelper.h" #include <device_launch_parameters.h> void CrowdPressure::ToolSystem() { m_pressureArray = TransferHelper::ReserveFloatMemory(); } void CrowdPressure::FreeResources() { m_pressureArray.FreeArray(); } __global__ void ComputeCrowdPressureCuda(size_t strides, float* densityField, float* velocityField, unsigned int* wallData, float* result) { __shared__ float inputBuffer[gBlockSize + 2][gBlockSize + 2]; __shared__ unsigned int locallyBlockedField[gBlockSize + 2][gBlockSize + 2]; // We keep tack of the pixel we are responsible for. int xOrigin = threadIdx.x + gBlockSize * blockIdx.x + 1; int yOrigin = threadIdx.y + gBlockSize * blockIdx.y + 1; int xScan = threadIdx.x + 1; int yScan = threadIdx.y + 1; int totalIndex = xOrigin + yOrigin * strides; inputBuffer[xScan][yScan] = velocityField[totalIndex]; locallyBlockedField[xScan][yScan] = wallData[totalIndex]; if (threadIdx.x == 0) { inputBuffer[0][yScan] = velocityField[(xOrigin - 1) + yOrigin * strides]; locallyBlockedField[0][yScan] = wallData[(xOrigin - 1) + yOrigin * strides]; } if (threadIdx.x == 31) { inputBuffer[xScan + 1][yScan] = velocityField[(xOrigin + 1) + yOrigin * strides]; locallyBlockedField[xScan + 1][yScan] = wallData[(xOrigin + 1) + yOrigin * strides]; } if (threadIdx.y == 0) { inputBuffer[xScan][0] = velocityField[xOrigin + (yOrigin - 1)* strides]; locallyBlockedField[xScan][0] = wallData[xOrigin + (yOrigin - 1)* strides]; } if (threadIdx.y == 31) { inputBuffer[xScan][yScan + 1] = velocityField[xOrigin + (yOrigin + 1) * strides]; locallyBlockedField[xScan][yScan + 1] = wallData[xOrigin + (yOrigin + 1) * strides]; } __syncthreads(); bool leftValid = (locallyBlockedField[xScan - 1][yScan] != 0); bool rightValid = (locallyBlockedField[xScan + 1][yScan] != 0); float xGrad = 0.0f; if (leftValid && rightValid) { xGrad = (inputBuffer[xScan + 1][yScan] - inputBuffer[xScan - 1][yScan]) / (2.0f * gCellSize); } else if (rightValid) { xGrad = (inputBuffer[xScan][yScan] - inputBuffer[xScan - 1][yScan]) / (gCellSize); } else if (leftValid) { xGrad = (inputBuffer[xScan + 1][yScan] - inputBuffer[xScan][yScan]) / ( gCellSize); } bool topValid = (locallyBlockedField[xScan][yScan - 1] == 0); bool bottomValid = (locallyBlockedField[xScan][yScan + 1] == 0); float yGrad = 0.0f; if (topValid && bottomValid) { yGrad = (inputBuffer[xScan][yScan + 1] - inputBuffer[xScan][yScan - 1]) / (2.0f * gCellSize); } else if (topValid) { yGrad = (inputBuffer[xScan][yScan] - inputBuffer[xScan][yScan - 1]) / (gCellSize); } else if (bottomValid) { yGrad = (inputBuffer[xScan][yScan + 1] - inputBuffer[xScan][yScan]) / (gCellSize); } result[totalIndex] = densityField[totalIndex] * (xGrad * xGrad + yGrad * yGrad); } void CrowdPressure::ComputeCrowdPressure(FloatArray density, FloatArray velocity, DataBase* dataBase) { UnsignedArray wallData = dataBase->GetWallData(); assert(density.m_stride == velocity.m_stride); assert(density.m_stride == m_pressureArray.m_stride); assert(density.m_stride == wallData.m_stride); ComputeCrowdPressureCuda CUDA_DECORATOR_LOGIC (density.m_stride, density.m_array, velocity.m_array, wallData.m_array ,m_pressureArray.m_array); }
660dd06a7243297e0dcbdca6b4ed3bcc99a46e75.cu
#include "CrowdPressure.h" #include "TransferHelper.h" #include "DataBase.h" #include <cassert> #include "CudaHelper.h" #include <device_launch_parameters.h> void CrowdPressure::ToolSystem() { m_pressureArray = TransferHelper::ReserveFloatMemory(); } void CrowdPressure::FreeResources() { m_pressureArray.FreeArray(); } __global__ void ComputeCrowdPressureCuda(size_t strides, float* densityField, float* velocityField, unsigned int* wallData, float* result) { __shared__ float inputBuffer[gBlockSize + 2][gBlockSize + 2]; __shared__ unsigned int locallyBlockedField[gBlockSize + 2][gBlockSize + 2]; // We keep tack of the pixel we are responsible for. int xOrigin = threadIdx.x + gBlockSize * blockIdx.x + 1; int yOrigin = threadIdx.y + gBlockSize * blockIdx.y + 1; int xScan = threadIdx.x + 1; int yScan = threadIdx.y + 1; int totalIndex = xOrigin + yOrigin * strides; inputBuffer[xScan][yScan] = velocityField[totalIndex]; locallyBlockedField[xScan][yScan] = wallData[totalIndex]; if (threadIdx.x == 0) { inputBuffer[0][yScan] = velocityField[(xOrigin - 1) + yOrigin * strides]; locallyBlockedField[0][yScan] = wallData[(xOrigin - 1) + yOrigin * strides]; } if (threadIdx.x == 31) { inputBuffer[xScan + 1][yScan] = velocityField[(xOrigin + 1) + yOrigin * strides]; locallyBlockedField[xScan + 1][yScan] = wallData[(xOrigin + 1) + yOrigin * strides]; } if (threadIdx.y == 0) { inputBuffer[xScan][0] = velocityField[xOrigin + (yOrigin - 1)* strides]; locallyBlockedField[xScan][0] = wallData[xOrigin + (yOrigin - 1)* strides]; } if (threadIdx.y == 31) { inputBuffer[xScan][yScan + 1] = velocityField[xOrigin + (yOrigin + 1) * strides]; locallyBlockedField[xScan][yScan + 1] = wallData[xOrigin + (yOrigin + 1) * strides]; } __syncthreads(); bool leftValid = (locallyBlockedField[xScan - 1][yScan] != 0); bool rightValid = (locallyBlockedField[xScan + 1][yScan] != 0); float xGrad = 0.0f; if (leftValid && rightValid) { xGrad = (inputBuffer[xScan + 1][yScan] - inputBuffer[xScan - 1][yScan]) / (2.0f * gCellSize); } else if (rightValid) { xGrad = (inputBuffer[xScan][yScan] - inputBuffer[xScan - 1][yScan]) / (gCellSize); } else if (leftValid) { xGrad = (inputBuffer[xScan + 1][yScan] - inputBuffer[xScan][yScan]) / ( gCellSize); } bool topValid = (locallyBlockedField[xScan][yScan - 1] == 0); bool bottomValid = (locallyBlockedField[xScan][yScan + 1] == 0); float yGrad = 0.0f; if (topValid && bottomValid) { yGrad = (inputBuffer[xScan][yScan + 1] - inputBuffer[xScan][yScan - 1]) / (2.0f * gCellSize); } else if (topValid) { yGrad = (inputBuffer[xScan][yScan] - inputBuffer[xScan][yScan - 1]) / (gCellSize); } else if (bottomValid) { yGrad = (inputBuffer[xScan][yScan + 1] - inputBuffer[xScan][yScan]) / (gCellSize); } result[totalIndex] = densityField[totalIndex] * (xGrad * xGrad + yGrad * yGrad); } void CrowdPressure::ComputeCrowdPressure(FloatArray density, FloatArray velocity, DataBase* dataBase) { UnsignedArray wallData = dataBase->GetWallData(); assert(density.m_stride == velocity.m_stride); assert(density.m_stride == m_pressureArray.m_stride); assert(density.m_stride == wallData.m_stride); ComputeCrowdPressureCuda CUDA_DECORATOR_LOGIC (density.m_stride, density.m_array, velocity.m_array, wallData.m_array ,m_pressureArray.m_array); }
1ddd6b2c5b5cad448f66a24338b660852cedb1c3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/c_embedding_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return ::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbedding(T* out, const T* table, const IndexT* ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; PADDLE_ENFORCE(real_idx < N, "The index is out of bounds, " "please check whether the dimensions of index and " "input meet the requirements. It should " "be less than [%d], but received [%d]", N, real_idx); out[i] = table[real_idx * columns + col]; } else { out[i] = static_cast<T>(0); } } } template <typename T, typename Context> void CEmbeddingKernel(const Context& ctx, const DenseTensor& w, const DenseTensor& ids, int64_t start_index, DenseTensor* out) { size_t N = w.dims()[0]; size_t D = w.dims()[1]; size_t K = ids.numel(); const int64_t end_idx = start_index + N; auto* table = w.data<T>(); auto* output = ctx.template Alloc<T>(out); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const auto& index_type = ids.dtype(); if (index_type == phi::DataType::INT32) { hipLaunchKernelGGL(( CEmbedding<T, int32_t>) , dim3(blocks), dim3(threads), 0, ctx.stream(), output, table, ids.data<int32_t>(), K, D, N, start_index, end_idx, limit); } else if (index_type == phi::DataType::INT64) { hipLaunchKernelGGL(( CEmbedding<T, int64_t>) , dim3(blocks), dim3(threads), 0, ctx.stream(), output, table, ids.data<int64_t>(), K, D, N, start_index, end_idx, limit); } else { PADDLE_THROW(phi::errors::Unavailable( "GPU c_embedding ids only support int32 or int64.")); } } } // namespace phi #if NCCL_VERSION_CODE >= 21000 && TORCH_HIP_VERSION >= 11000 PD_REGISTER_KERNEL(c_embedding, GPU, ALL_LAYOUT, phi::CEmbeddingKernel, float, double, phi::dtype::bfloat16, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(c_embedding, GPU, ALL_LAYOUT, phi::CEmbeddingKernel, float, double, phi::dtype::float16) {} #endif
1ddd6b2c5b5cad448f66a24338b660852cedb1c3.cu
// Copyright (c) 2023 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/c_embedding_kernel.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { static constexpr int kNumCUDAThreads = 512; static constexpr int kNumMaxinumNumBlocks = 4096; static inline int NumBlocks(const int N) { return std::min((N + kNumCUDAThreads - 1) / kNumCUDAThreads, kNumMaxinumNumBlocks); } template <typename T, typename IndexT> __global__ void CEmbedding(T* out, const T* table, const IndexT* ids, const int rows, const int columns, const int64_t N, const int64_t start_idx, const int64_t end_idx, const int64_t limit) { CUDA_KERNEL_LOOP(i, limit) { size_t row = i / columns; size_t col = i % columns; auto id = ids[row]; if (id >= start_idx && id < end_idx) { auto real_idx = id - start_idx; PADDLE_ENFORCE(real_idx < N, "The index is out of bounds, " "please check whether the dimensions of index and " "input meet the requirements. It should " "be less than [%d], but received [%d]", N, real_idx); out[i] = table[real_idx * columns + col]; } else { out[i] = static_cast<T>(0); } } } template <typename T, typename Context> void CEmbeddingKernel(const Context& ctx, const DenseTensor& w, const DenseTensor& ids, int64_t start_index, DenseTensor* out) { size_t N = w.dims()[0]; size_t D = w.dims()[1]; size_t K = ids.numel(); const int64_t end_idx = start_index + N; auto* table = w.data<T>(); auto* output = ctx.template Alloc<T>(out); auto limit = K * D; int blocks = NumBlocks(limit); int threads = kNumCUDAThreads; const auto& index_type = ids.dtype(); if (index_type == phi::DataType::INT32) { CEmbedding<T, int32_t> <<<blocks, threads, 0, ctx.stream()>>>(output, table, ids.data<int32_t>(), K, D, N, start_index, end_idx, limit); } else if (index_type == phi::DataType::INT64) { CEmbedding<T, int64_t> <<<blocks, threads, 0, ctx.stream()>>>(output, table, ids.data<int64_t>(), K, D, N, start_index, end_idx, limit); } else { PADDLE_THROW(phi::errors::Unavailable( "GPU c_embedding ids only support int32 or int64.")); } } } // namespace phi #if NCCL_VERSION_CODE >= 21000 && CUDA_VERSION >= 11000 PD_REGISTER_KERNEL(c_embedding, GPU, ALL_LAYOUT, phi::CEmbeddingKernel, float, double, phi::dtype::bfloat16, phi::dtype::float16) {} #else PD_REGISTER_KERNEL(c_embedding, GPU, ALL_LAYOUT, phi::CEmbeddingKernel, float, double, phi::dtype::float16) {} #endif
440dc916c715716842575ae1d492c3f61c4c2972.hip
// !!! This is a file automatically generated by hipify!!! /************************************************************************** * * set up GPU for processing * **************************************************************************/ #include <hip/hip_runtime.h> #include <stdio.h> #include "gpu_main.h" #include <hip/hip_runtime.h> #define gScalar 0.2 texture<float, 2, hipReadModeElementType> texGreen; /******************************************************************************/ GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight) { GPU_Palette X; X.gThreads.x = 32; // 32 x 32 = 1024 threads per block X.gThreads.y = 32; X.gThreads.z = 1; X.gBlocks.x = ceil(imageWidth/32); // however many blocks needed for image X.gBlocks.y = ceil(imageHeight/32); X.gBlocks.z = 1; X.palette_width = imageWidth; // save this info X.palette_height = imageHeight; X.num_pixels = imageWidth * imageHeight; // allocate memory on GPU corresponding to pixel colors: hipError_t err; err = hipMalloc((void**) &X.red, X.num_pixels * sizeof(float)); if(err != hipSuccess){ printf("cuda error allocating red = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMalloc((void**) &X.green, X.num_pixels * sizeof(float)); // g if(err != hipSuccess){ printf("cuda error allocating green = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipMalloc((void**) &X.blue, X.num_pixels * sizeof(float)); // b if(err != hipSuccess){ printf("cuda error allocating blue = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } float *devPtr; size_t size=64*sizeof(float); hipMalloc((void **) &devPtr, size); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); err = hipBindTexture(NULL, &texGreen, devPtr, &channelDesc, size); if (err != hipSuccess) { printf("cuda error bind texture = %s\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } return X; } /******************************************************************************/ void freeGPUPalette(GPU_Palette* P) { hipFree(P->red); hipFree(P->green); hipFree(P->blue); } /******************************************************************************/ int updatePalette(GPU_Palette* P, int xIdx, int yIdx, float z) { hipLaunchKernelGGL(( updateReds) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->red, xIdx, yIdx, z); hipLaunchKernelGGL(( updateGreens) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->green, xIdx, yIdx, z); hipLaunchKernelGGL(( updateBlues) , dim3(P->gBlocks), dim3(P->gThreads) , 0, 0, P->blue, xIdx, yIdx, z); return 0; } /******************************************************************************/ __global__ void updateReds(float* red, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { red[vecIdx] = 1.0; } else { red[vecIdx] *= .98; } } /******************************************************************************/ __global__ void updateGreens(float* green, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { green[vecIdx] = 0.5; } else { float acc = 0.0; for (int i = -5;i <= 5;i++) { for (int j = -5;j <= 5;j++) { acc += tex2D(texGreen, x + i, y + j); } } acc /= 121.0; green[vecIdx] = acc; } } /******************************************************************************/ __global__ void updateBlues(float* blue, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { blue[vecIdx] = 0.6; } else { blue[vecIdx] *= .93; } }
440dc916c715716842575ae1d492c3f61c4c2972.cu
/************************************************************************** * * set up GPU for processing * **************************************************************************/ #include <cuda.h> #include <stdio.h> #include "gpu_main.h" #include <cuda_runtime.h> #define gScalar 0.2 texture<float, 2, cudaReadModeElementType> texGreen; /******************************************************************************/ GPU_Palette initGPUPalette(unsigned int imageWidth, unsigned int imageHeight) { GPU_Palette X; X.gThreads.x = 32; // 32 x 32 = 1024 threads per block X.gThreads.y = 32; X.gThreads.z = 1; X.gBlocks.x = ceil(imageWidth/32); // however many blocks needed for image X.gBlocks.y = ceil(imageHeight/32); X.gBlocks.z = 1; X.palette_width = imageWidth; // save this info X.palette_height = imageHeight; X.num_pixels = imageWidth * imageHeight; // allocate memory on GPU corresponding to pixel colors: cudaError_t err; err = cudaMalloc((void**) &X.red, X.num_pixels * sizeof(float)); if(err != cudaSuccess){ printf("cuda error allocating red = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMalloc((void**) &X.green, X.num_pixels * sizeof(float)); // g if(err != cudaSuccess){ printf("cuda error allocating green = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaMalloc((void**) &X.blue, X.num_pixels * sizeof(float)); // b if(err != cudaSuccess){ printf("cuda error allocating blue = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *devPtr; size_t size=64*sizeof(float); cudaMalloc((void **) &devPtr, size); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); err = cudaBindTexture(NULL, &texGreen, devPtr, &channelDesc, size); if (err != cudaSuccess) { printf("cuda error bind texture = %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return X; } /******************************************************************************/ void freeGPUPalette(GPU_Palette* P) { cudaFree(P->red); cudaFree(P->green); cudaFree(P->blue); } /******************************************************************************/ int updatePalette(GPU_Palette* P, int xIdx, int yIdx, float z) { updateReds <<< P->gBlocks, P->gThreads >>> (P->red, xIdx, yIdx, z); updateGreens <<< P->gBlocks, P->gThreads >>> (P->green, xIdx, yIdx, z); updateBlues <<< P->gBlocks, P->gThreads >>> (P->blue, xIdx, yIdx, z); return 0; } /******************************************************************************/ __global__ void updateReds(float* red, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { red[vecIdx] = 1.0; } else { red[vecIdx] *= .98; } } /******************************************************************************/ __global__ void updateGreens(float* green, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { green[vecIdx] = 0.5; } else { float acc = 0.0; for (int i = -5;i <= 5;i++) { for (int j = -5;j <= 5;j++) { acc += tex2D(texGreen, x + i, y + j); } } acc /= 121.0; green[vecIdx] = acc; } } /******************************************************************************/ __global__ void updateBlues(float* blue, int xIdx, int yIdx, float z){ int size = 5; int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int vecIdx = x + (y * blockDim.x * gridDim.x); if ((xIdx < x + size) && (xIdx > x - size) && (yIdx < y + size) && (yIdx > y - size)) { blue[vecIdx] = 0.6; } else { blue[vecIdx] *= .93; } }
4f48bfe3bf5edfb34b53e233f77396438698705e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_front; int xdim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_front; int ydim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_front; int xdim1_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_front; int ydim1_update_halo_kernel4_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_front*(y)+xdim0_update_halo_kernel4_plus_2_front*ydim0_update_halo_kernel4_plus_2_front*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_front*(y)+xdim1_update_halo_kernel4_plus_2_front*ydim1_update_halo_kernel4_plus_2_front*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_front_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,-2)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,-2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_front + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_front * ydim0_update_halo_kernel4_plus_2_front; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_front + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_front * ydim1_update_halo_kernel4_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_front_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,83)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(83,"update_halo_kernel4_plus_2_front"); OPS_kernels[83].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_front_h || ydim0 != ydim0_update_halo_kernel4_plus_2_front_h || xdim1 != xdim1_update_halo_kernel4_plus_2_front_h || ydim1 != ydim1_update_halo_kernel4_plus_2_front_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_front, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_front_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_front, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_front_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_front, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_front_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_front, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[83].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_update_halo_kernel4_plus_2_front), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[83].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[83].mpi_time += t2-t1; OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 83; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 83; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_front_execute; if (OPS_diags > 1) { ops_timing_realloc(83,"update_halo_kernel4_plus_2_front"); } ops_enqueue_kernel(desc); } #endif
4f48bfe3bf5edfb34b53e233f77396438698705e.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel4_plus_2_front; int xdim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim0_update_halo_kernel4_plus_2_front; int ydim0_update_halo_kernel4_plus_2_front_h = -1; __constant__ int xdim1_update_halo_kernel4_plus_2_front; int xdim1_update_halo_kernel4_plus_2_front_h = -1; __constant__ int ydim1_update_halo_kernel4_plus_2_front; int ydim1_update_halo_kernel4_plus_2_front_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel4_plus_2_front*(y)+xdim0_update_halo_kernel4_plus_2_front*ydim0_update_halo_kernel4_plus_2_front*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel4_plus_2_front*(y)+xdim1_update_halo_kernel4_plus_2_front*ydim1_update_halo_kernel4_plus_2_front*(z)) //user function __device__ inline void update_halo_kernel4_plus_2_front_gpu(double *vol_flux_y, double *mass_flux_y, const int* fields) { if(fields[FIELD_VOL_FLUX_Y] == 1) vol_flux_y[OPS_ACC0(0,0,0)] = vol_flux_y[OPS_ACC0(0,0,-2)]; if(fields[FIELD_MASS_FLUX_Y] == 1) mass_flux_y[OPS_ACC1(0,0,0)] = mass_flux_y[OPS_ACC1(0,0,-2)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel4_plus_2_front( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel4_plus_2_front + idx_z * 1*1 * xdim0_update_halo_kernel4_plus_2_front * ydim0_update_halo_kernel4_plus_2_front; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel4_plus_2_front + idx_z * 1*1 * xdim1_update_halo_kernel4_plus_2_front * ydim1_update_halo_kernel4_plus_2_front; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel4_plus_2_front_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel4_plus_2_front_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif //Timing double t1,t2,c1,c2; ops_arg args[3] = { arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,3,range,83)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(83,"update_halo_kernel4_plus_2_front"); OPS_kernels[83].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel4_plus_2_front_h || ydim0 != ydim0_update_halo_kernel4_plus_2_front_h || xdim1 != xdim1_update_halo_kernel4_plus_2_front_h || ydim1 != ydim1_update_halo_kernel4_plus_2_front_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel4_plus_2_front, &xdim0, sizeof(int) ); xdim0_update_halo_kernel4_plus_2_front_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel4_plus_2_front, &ydim0, sizeof(int) ); ydim0_update_halo_kernel4_plus_2_front_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel4_plus_2_front, &xdim1, sizeof(int) ); xdim1_update_halo_kernel4_plus_2_front_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel4_plus_2_front, &ydim1, sizeof(int) ); ydim1_update_halo_kernel4_plus_2_front_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[83].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_update_halo_kernel4_plus_2_front<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[83].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[83].mpi_time += t2-t1; OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[83].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel4_plus_2_front(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 83; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 83; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg*)malloc(3*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int)); memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel4_plus_2_front_execute; if (OPS_diags > 1) { ops_timing_realloc(83,"update_halo_kernel4_plus_2_front"); } ops_enqueue_kernel(desc); } #endif
c0e0fb6c7a74e645e5f211eb3bcf31730a1d5653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "syn.cuh" #include <time.h> #include <math.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> // Debug __global__ static void printf_GPU ( const int *comp, const double *elem, const int n_syn, const int n_comp, const int n_elem ) { for ( int i = 0; i < n_syn; i++ ) { //printf ( "Pre = %d, PostNum = %d, PostComp = %d\n", comp [ i ] / 578, comp [ n_syn + i ] % 1600, comp [ n_syn + i ] / 1600 ); printf ( "Pre = %d, Post = %d\n", comp [ i ], comp [ n_syn + i ] ); for ( int j = 0; j < n_elem; j++ ) { printf ( "%f, ", elem [ j * n_syn + i ] ); } printf ("\n"); } } __host__ __device__ static void reset_array ( int * array, int num ) { for ( int i = 0; i < num; i++ ) { array [ i ] = -1; } } __host__ __device__ static void reset_zero ( int * array, int num ) { for ( int i = 0; i < num; i++ ) { array [ i ] = 0; } } __global__ void setCurand ( unsigned long seed, hiprandState_t *state, const int num ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num ) { hiprand_init ( seed, id, 0, & ( state [ id ] ) ); } } //////////////////////////////// MFGR ////////////////////////////// __global__ static void mfgr_initialize ( int *d_comp, double *d_elem, const int num_mfgr, const int n_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_gr ) { for( int j = 0; j < 4; j++ ) { d_comp [ pre_comp * num_mfgr + id * 4 + j ] = id; // not use d_comp [ post_comp * num_mfgr + id * 4 + j ] = j + id * GR_COMP; d_elem [ mfgr_ampa * num_mfgr + id * 4 + j ] = 0.0; d_elem [ mfgr_nmda * num_mfgr + id * 4 + j ] = 0.0; d_elem [ mfgr_weight * num_mfgr + id * 4 + j ] = W_MFGR / 4.0; d_elem [ mfgr_val * num_mfgr + id * 4 + j ] = 0.0; } } } __host__ synapse_t *mfgr_create ( const int n_gr ) { int num_mfgr = n_gr * 4; synapse_t *d_mfgr = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); d_mfgr -> n = num_mfgr; if ( num_mfgr == 0 ) { printf ( "# of mfgr = 0\n" ); return d_mfgr; } else { printf ( "# of mfgr = %d\n", num_mfgr ); } hipMalloc ( ( int ** ) & ( d_mfgr -> comp ), syn_n_comp * num_mfgr * sizeof ( int ) ); hipMalloc ( ( double ** ) & ( d_mfgr -> elem ), mfgr_n_elem * num_mfgr * sizeof ( double ) ); d_mfgr -> f_out = fopen ( "MF_RASTER.csv", "w" ); hipLaunchKernelGGL(( mfgr_initialize) , dim3(( ( n_gr ) + 127 ) / 128), dim3(128) , 0, 0, d_mfgr -> comp, d_mfgr -> elem, num_mfgr, n_gr ); // Set rand hipMalloc ( ( void ** ) &( d_mfgr -> cstate ), num_mfgr * sizeof ( hiprandState_t ) ); hipLaunchKernelGGL(( setCurand) , dim3(( num_mfgr + 127 ) / 128), dim3(128) , 0, 0, rand (), d_mfgr -> cstate, num_mfgr ); return d_mfgr; } __global__ void mfgr_update ( int *mfgr_comp, double *mfgr_elem, const double t, const int num_mfgr, hiprandState_t *S ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_mfgr ) { int firing_flag; double fr_mf = 5.0; //( 0 <= t && t < 250 )? fr_mf = 60.0 : fr_mf = 5.0; ( S_Stimuli <= t && t < E_Stimuli )? fr_mf = 60.0 : fr_mf = 5.0; double f = hiprand_uniform ( & ( S [ id ] ) ); ( fr_mf * RKC_DT * 0.001 > f )? firing_flag = 1 : firing_flag = 0; // Decay :exp( - DT / tau ) = 0.90107510572 ( DT = 0.125, tau = 1.2 )// gmax = 24000e-9, 0.81193634615 (DT = 0.025) mfgr_elem [ mfgr_ampa * num_mfgr + id ] = mfgr_elem [ mfgr_ampa * num_mfgr + id ] * D_MFGR_AMPA + firing_flag; // Decay :exp( - DT / tau ) = 0.99759904077 ( DT = 0.125, tau = 52 ) // gmax = 32000e-9, 0.99520384614 (DT = 0.025) mfgr_elem [ mfgr_nmda * num_mfgr + id ] = mfgr_elem [ mfgr_ampa * num_mfgr + id ] * D_MFGR_NMDA + firing_flag; mfgr_elem [ mfgr_val * num_mfgr + id ] = mfgr_elem [ mfgr_weight * num_mfgr + id ] * ( G_MFGR_AMPA * mfgr_elem [ mfgr_ampa * num_mfgr + id ] + G_MFGR_NMDA * mfgr_elem [ mfgr_nmda * num_mfgr + id ] ); // 0.88 : 0.12 //int l_comp = mfgr_comp [ post_comp * num_mfgr + id ]; // d_gr -> elem [ g_syn ] [ l_comp ] = mfgr_elem [ mfgr_val * num_mfgr + id ]; // += } } __host__ void mf_output_file ( synapse_t *d_mfgr, const double t, neuron_t *p_gr ) { FILE *f = d_mfgr -> f_out; double *ret = ( double * ) malloc ( sizeof ( double ) * mfgr_n_elem * d_mfgr -> n ); hipMemcpy ( ret, d_mfgr -> elem, mfgr_n_elem * d_mfgr -> n * sizeof ( double ), hipMemcpyDeviceToHost ); double val = 0.0; fprintf ( f, "%lf,", t ); for ( int j = 0; j < d_mfgr -> n; j++ ) { val = G_MFGR_AMPA * 0.88 * ret [ mfgr_ampa * d_mfgr -> n + j ] + G_MFGR_NMDA * 0.12 * ret [ mfgr_nmda * d_mfgr -> n + j ]; val *= ret [ mfgr_weight * d_mfgr -> n + j ] *1000000; fprintf ( f, "%lf,", val ); } fprintf ( f, "\n" ); free ( ret ); } __host__ void mfgr_finalize ( synapse_t *d_mfgr , const int n_gr ) { if ( n_gr > 0 ) { hipFree ( d_mfgr -> comp ); hipFree ( d_mfgr -> elem ); hipFree ( d_mfgr -> cstate ); fclose ( d_mfgr -> f_out ); } free ( d_mfgr ); } //////////////////////////////// GRGO ////////////////////////////// __global__ static void grgo_initialize ( int *d_comp, double *d_elem, const int n_gr, const int nx_gr, const int ny_gr, const int nx_go, const int ny_go, const int num_grgo, const int *d_label_gogr, const int *d_num_syn_gr, const int *d_num_syn_go ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_gr ) { int l_num_syn = d_num_syn_gr [ id ]; // # of synapses from id-th GrC int s_syn_id = 0; // # of synapses from GrCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_gr [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_gr = id; int l_n_go = d_label_gogr [ l_n_gr * ny_go + i - s_syn_id ]; int iy_gr = l_n_gr / nx_gr; int iy_go = l_n_go / nx_go; float diff_grgo = 8.0 + iy_go * 32.0 - iy_gr * 16.0; // Distance between GrC and GoC somas int gr_ax_comp = 250 + 77 - ( int ) ( diff_grgo / 10.0 ); // Debug if ( l_n_go < 0 || fabs ( diff_grgo ) > 220.0 || gr_ax_comp > 577 ) { printf ( "error in grgo_initialize\n" ); } d_comp [ pre_comp * num_grgo + i ] = gr_ax_comp + GR_COMP * l_n_gr; d_comp [ post_comp * num_grgo + i ] = 0 + GO_COMP * l_n_go; // soma d_elem [ grgo_ampa * num_grgo + i ] = 0.0; d_elem [ grgo_nmda1 * num_grgo + i ] = 0.0; d_elem [ grgo_nmda2 * num_grgo + i ] = 0.0; d_elem [ grgo_weight * num_grgo + i ] = W_GRGO / ( d_num_syn_go [ l_n_go ] * 1.0/4.0 );//W_GRGO / ( l_num_syn * 1.0 ); d_elem [ grgo_val * num_grgo + i ] = 0.0; d_elem [ grgo_old_v * num_grgo + i ] = 1000.0; } } } __host__ synapse_t *grgo_create ( const int nx_gr, const int ny_gr, const int nx_go, const int ny_go ) { int n_gr = nx_gr * ny_gr; int n_go = nx_go * ny_go; int max_n_grgo = ny_go * n_gr; synapse_t *d_grgo = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_go * n_gr == 0 ) { d_grgo -> n = n_go * n_gr; printf ( "# of grgo = 0\n" ); return d_grgo; } int *label_grgo = ( int * ) malloc ( max_n_grgo * sizeof ( int ) ); // GoC labels are connected by each GrC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GrC int *num_syn_go = ( int * ) malloc ( n_go * sizeof ( int ) ); // # of synapses from each GoC reset_array ( label_grgo, max_n_grgo ); reset_zero ( num_syn_gr, n_gr ); reset_zero ( num_syn_go, n_go ); int num_grgo = 0; for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate int l_count = 0; for ( int i_go = 0; i_go < n_go; i_go++ ) { double lx_go = ( int ) ( i_go % nx_go ) * 32.0 + 8.0; // i_go's x-coordinate double ly_go = ( int ) ( i_go / nx_go ) * 32.0 + 8.0; // i_go's y-coordinate if ( abs ( lx_go - lx_gr ) < 16.0 && abs ( ly_go - ly_gr ) < 220.0 ) { label_grgo [ i_gr * ny_go + l_count ] = i_go; l_count++; num_syn_go [ i_go ]++; } } num_syn_gr [ i_gr ] = l_count; num_grgo += l_count; // Debug if ( l_count > ny_go ) { printf ( "Error in grgo_create\n" ); exit ( 1 ); } } d_grgo -> n = num_grgo; hipMalloc ( ( int ** ) & ( d_grgo -> comp ), syn_n_comp * num_grgo * sizeof ( int ) ); hipMalloc ( ( double ** ) & ( d_grgo -> elem ), grgo_n_elem * num_grgo * sizeof ( double ) ); printf ( "# of grgo = %d\n", d_grgo -> n ); // Copy host array to device array int *d_label_grgo; int *d_num_syn_gr; int *d_num_syn_go; hipMalloc ( ( int ** ) & ( d_label_grgo ), max_n_grgo * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_go ), n_go * sizeof ( int ) ); hipMemcpy ( d_label_grgo, label_grgo, max_n_grgo * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_go, num_syn_go, n_go * sizeof ( int ), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( grgo_initialize) , dim3(( ( n_gr ) + 127 ) / 128), dim3(128) , 0, 0, d_grgo -> comp, d_grgo -> elem, n_gr, nx_gr, ny_gr, nx_go, ny_go, num_grgo, d_label_grgo, d_num_syn_gr, d_num_syn_go ); // Debug //printf ("\nDebug for grgo"); //printf_GPU <<< 1, 1 >>> ( d_grgo -> comp, d_grgo -> elem, num_grgo, syn_n_comp, grgo_n_elem ); //hipDeviceSynchronize(); free ( label_grgo ); free ( num_syn_gr ); free ( num_syn_go ); hipFree ( d_label_grgo ); hipFree ( d_num_syn_gr ); hipFree ( d_num_syn_go ); return d_grgo; } __global__ static void grgo_update ( int *grgo_comp, double *grgo_elem, const int num_grgo, neuron_t *d_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_grgo ) { int firing_flag = 0; int pre_num = grgo_comp [ pre_comp * num_grgo + id ]; double pre_comp_v = d_gr -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && grgo_elem [ grgo_old_v * num_grgo + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.92004441462 //tau = 1.5 = exp(-0.25 / tau ) = 0.84648172489 grgo_elem [ grgo_ampa * num_grgo + id ] = grgo_elem [ grgo_ampa * num_grgo + id ] * D_GRGO_AMPA + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99597586057 //tau = 31 = exp(-0.25 / tau ) = 0.99196791484 grgo_elem [ grgo_nmda1 * num_grgo + id ] = grgo_elem [ grgo_nmda1 * num_grgo + id ] * D_GRGO_NMDA1 + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99926497614 //tau = 170 = exp(-0.25 / tau ) = 0.99853049255 grgo_elem [ grgo_nmda2 * num_grgo + id ] = grgo_elem [ grgo_nmda2 * num_grgo + id ] * D_GRGO_NMDA2 + firing_flag; grgo_elem [ grgo_val * num_grgo + id ] = grgo_elem [ grgo_weight * num_grgo + id ] * ( G_GRGO_AMPA * grgo_elem [ grgo_ampa * num_grgo + id ] + G_GRGO_NMDA * ( 0.33 * grgo_elem [ grgo_nmda1 * num_grgo + id ] + 0.67 * grgo_elem [ grgo_nmda2 * num_grgo + id ] ) ); grgo_elem [ grgo_old_v * num_grgo + id ] = pre_comp_v; } } __host__ void grgo_finalize ( synapse_t *d_grgo, const int n_grgo ) { if ( n_grgo > 0 ) { hipFree ( d_grgo -> comp ); hipFree ( d_grgo -> elem ); //fclose ( d_grgo -> f_out ); } free ( d_grgo ); } //////////////////////////////// GOGR ////////////////////////////// __global__ static void gogr_initialize3 ( int *d_comp, double *d_elem, const int n_go, const int n_gr, const int num_gogr, const int *d_label_gogr, const int *d_num_syn_go, const int *d_num_syn_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_go ) { int l_num_syn = d_num_syn_go [ id ]; // # of synapses from id-th GoC int s_syn_id = 0; // # of synapses from GoCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_go [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_go = id; int l_n_gr = d_label_gogr [ l_n_go * 4 * 4 + i - s_syn_id ]; // Debug if ( l_n_gr < 0 ) { printf ( "error in gogr_initialize\n" ); } d_comp [ pre_comp * num_gogr + i ] = ( GO_COMP_DEND * 3 ) + GO_COMP * l_n_go; d_comp [ post_comp * num_gogr + i ] = 16 + GR_COMP * l_n_gr; // soma d_elem [ gogr_gabaa * num_gogr + i ] = 0.0; d_elem [ gogr_gabab * num_gogr + i ] = 0.0; d_elem [ gogr_weight * num_gogr + i ] = W_GOGR / ( d_num_syn_gr [ l_n_gr ] * 4.0 ); d_elem [ gogr_val * num_gogr + i ] = 0.0; d_elem [ gogr_old_v * num_gogr + i ] = 1000.0; } } } __host__ synapse_t *gogr_create ( const int nx_go, const int ny_go, const int nx_gr, const int ny_gr ) { int n_go = nx_go * ny_go; int n_gr = nx_gr * ny_gr; int max_n_gogr = n_go * 4 * 4; synapse_t *d_gogr = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_go * n_gr == 0 ) { d_gogr -> n = n_go * n_gr; printf ( "# of gogr = 0\n" ); return d_gogr; } int *label_gogr = ( int * ) malloc ( max_n_gogr * sizeof ( int ) ); // GrC labels are connected by each GoC int *num_syn_go = ( int * ) malloc ( n_go * sizeof ( int ) ); // # of synapses from each GoC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GoC reset_array ( label_gogr, max_n_gogr ); reset_zero ( num_syn_go, n_go ); reset_zero ( num_syn_gr, n_gr ); int num_gogr = 0; for ( int i_go = 0; i_go < n_go; i_go++ ) { int l_count = 0; double lx_go = ( int ) ( i_go % nx_go ) * 32.0 + 8.0; // i_go's x-coordinate double ly_go = ( int ) ( i_go / nx_go ) * 32.0 + 8.0; // i_go's y-coordinate for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate if ( fabs ( lx_go - lx_gr ) < 32.0 && fabs ( ly_go - ly_gr ) < 32.0 ) { label_gogr [ i_go * 4 * 4 + l_count ] = i_gr; l_count++; num_syn_gr [ i_gr ]++; } } num_syn_go [ i_go ] = l_count; num_gogr += l_count; // Debug if ( l_count > 4 * n_gr ) { printf ( "Error in gogr_create\n" ); exit ( 1 ); } } d_gogr -> n = num_gogr; hipMalloc ( ( int ** ) & ( d_gogr -> comp ), syn_n_comp * num_gogr * sizeof ( int ) ); hipMalloc ( ( double ** ) & ( d_gogr -> elem ), gogr_n_elem * num_gogr * sizeof ( double ) ); printf ( "# of gogr = %d\n", d_gogr -> n ); // Copy host array to device array int *d_label_gogr; int *d_num_syn_go, *d_num_syn_gr; hipMalloc ( ( int ** ) & ( d_label_gogr ), max_n_gogr * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_go ), n_go * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); hipMemcpy ( d_label_gogr, label_gogr, max_n_gogr * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_go, num_syn_go, n_go * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( gogr_initialize3) , dim3(( ( n_go ) + 127 ) / 128), dim3(128) , 0, 0, d_gogr -> comp, d_gogr -> elem, n_go, n_gr, num_gogr, d_label_gogr, d_num_syn_go, d_num_syn_gr ); free ( label_gogr ); free ( num_syn_go ); free ( num_syn_gr ); hipFree ( d_label_gogr ); hipFree ( d_num_syn_go ); hipFree ( d_num_syn_gr ); // Debug //printf ("\nDebug for gogr"); //printf_GPU <<< 1, 1 >>> ( d_gogr -> comp, d_gogr -> elem, num_gogr, syn_n_comp, gogr_n_elem ); //hipDeviceSynchronize(); return d_gogr; } __global__ static void gogr_update ( int *gogr_comp, double *gogr_elem, const int num_gogr, neuron_t *d_go ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_gogr ) { int firing_flag = 0; int pre_num = gogr_comp [ pre_comp * num_gogr + id ]; double pre_comp_v = d_go -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && gogr_elem [ gogr_old_v * num_gogr + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.9823013511 //tau = 7 = exp(-0.25 / tau ) = 0.96491594437 gogr_elem [ gogr_gabaa * num_gogr + id ] = gogr_elem [ gogr_gabaa * num_gogr + id ] * D_GOGR_GABAA + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99788359867 //tau = 59 = exp(-0.25 / tau ) = 0.9957716765 gogr_elem [ gogr_gabab * num_gogr + id ] = gogr_elem [ gogr_gabab * num_gogr + id ] * D_GOGR_GABAB + firing_flag; gogr_elem [ gogr_val * num_gogr + id ] = gogr_elem [ gogr_weight * num_gogr + id ] * G_GOGR_GABA * ( 0.43 * gogr_elem [ gogr_gabaa * num_gogr + id ] + 0.57 * gogr_elem [ gogr_gabab * num_gogr + id ] ); gogr_elem [ gogr_old_v * num_gogr + id ] = pre_comp_v; //int l_comp = gogr_comp [ post_comp * num_gogr + id ]; //d_gr -> elem [ g_syn ] [ l_comp ] += l_val; // += } } __host__ void gogr_finalize ( synapse_t *d_gogr, const int n_gogr ) { if ( n_gogr > 0 ) { hipFree ( d_gogr -> comp ); hipFree ( d_gogr -> elem ); //hipFree ( d_gogr -> cstate ); //fclose ( d_gogr -> f_out ); } free ( d_gogr ); } //////////////////////////////// GRPKJ ////////////////////////////// __global__ static void grpkj_initialize ( int *d_comp, double *d_elem, const int nx_gr, const int ny_gr, const int nx_pkj, const int ny_pkj, const int num_grpkj, const int *d_label_grpkj, const int *d_num_syn_gr, const double *d_x, const double *d_z, hiprandState_t *S, const int *d_num_syn_pkj ) { int id = threadIdx.x + blockIdx.x * blockDim.x; int n_gr = nx_gr * ny_gr; int n_pkj = nx_pkj * ny_pkj; if ( id < n_gr ) { int l_num_syn = d_num_syn_gr [ id ]; // # of synapses from id-th GrC int s_syn_id = 0; // # of synapses from GrCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_gr [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_gr = id; int l_n_pkj = d_label_grpkj [ l_n_gr * n_pkj + i - s_syn_id ]; int ix_gr = l_n_gr % nx_gr; int ix_pkj = l_n_pkj % nx_pkj; int iy_gr = l_n_gr / nx_gr; int iy_pkj = l_n_pkj / nx_pkj; double diff_x = ix_gr * 16.0 - ( 8.0 + ix_pkj * 32.0 ); // x Distance between GrC and GoC somas double diff_y = iy_gr * 16.0 - ( 8.0 + iy_pkj * 32.0 ); // y Distance between GrC and GoC somas int gr_ax_comp = 250 + 77 + ( int ) ( diff_y / 10.0 ); int pkj_d_comp = -1; double min_x = 1000000.0; int guarantee = 1; //if ( ix_pkj < 3 || ( nx_pkj - ix_pkj ) < 4 ) { guarantee = 3; } //if ( iy_pkj < 3 || ( ny_pkj - iy_pkj ) < 4 ) { guarantee = 3; } for ( int i_cp = 0; i_cp < N_Syn_Per_GRPKJ; i_cp++ ) { double f = hiprand_uniform ( & ( S [ i * N_Syn_Per_GRPKJ + i_cp ] ) ) - 0.5; for ( int i_comp = 0; i_comp < PKJ_COMP; i_comp++ ) { if ( fabs ( d_x [ i_comp ] - diff_x + 10.0 * f ) < min_x ) { min_x = fabs ( d_x [ i_comp ] - diff_x + 10.0 * f ); pkj_d_comp = i_comp; } } // Debug if ( pkj_d_comp < 0 || l_n_pkj < 0 || fabs ( diff_y ) > 220.0 || gr_ax_comp > 577 ) { printf ( "error in grpkj_initialize\n" ); } //if ( id < 2 ) printf ("%d -> %d \n", gr_ax_comp + GR_COMP * l_n_gr, pkj_d_comp + PKJ_COMP * l_n_pkj ); d_comp [ pre_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = gr_ax_comp + GR_COMP * l_n_gr; d_comp [ post_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = pkj_d_comp + PKJ_COMP * l_n_pkj; //if ( id < 2 ) printf ("%d -> %d \n", // d_comp [ pre_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ], // d_comp [ post_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] ); d_elem [ grpkj_ampa * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 0.0; d_elem [ grpkj_weight * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = W_GRPKJ / ( 512.0 * d_num_syn_pkj [ l_n_pkj ] * 1.0 * guarantee / 10.0 ); d_elem [ grpkj_val * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 0.0; d_elem [ grpkj_old_v * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 1000.0; } } } } __host__ synapse_t *grpkj_create ( const int nx_gr, const int ny_gr, const int nx_pkj, const int ny_pkj ) { int n_gr = nx_gr * ny_gr; int n_pkj = nx_pkj * ny_pkj; int max_n_grpkj = n_pkj * n_gr; synapse_t *d_grpkj = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_pkj * n_gr == 0 ) { d_grpkj -> n = 0; printf ( "# of grpkj = 0\n" ); return d_grpkj; } // read PKJ_compartments location FILE *f_ = fopen ( "Pkj_location_info.csv", "r" ); if ( ! f_ ) { fprintf ( stderr, "no such file %s\n", "Pkj_location_info.csv" ); exit ( 1 ); } int type [ PKJ_COMP ]; double x [ PKJ_COMP ]; double z [ PKJ_COMP ]; int i1, i5; double i2, i3, i4; for ( int i = 0; i < PKJ_COMP; i++ ) { if ( fscanf ( f_, "%d,%lf,%lf,%lf,%d", &i1, &i2, &i3, &i4, &i5 ) == ( EOF ) ) { printf ( "PARAM_FILE_READING_ERROR\n" ); exit ( 1 ); } //l_comp [ i ] = i1; x [ i ] = i2; //y [ i ] = i3; z [ i ] = i4; type [ i ] = i5; } fclose ( f_ ); double *d_x, *d_z; hipMalloc ( ( double ** ) & ( d_x ), PKJ_COMP * sizeof ( double ) ); hipMalloc ( ( double ** ) & ( d_z ), PKJ_COMP * sizeof ( double ) ); hipMemcpy ( d_x, x, PKJ_COMP * sizeof ( double ), hipMemcpyHostToDevice ); hipMemcpy ( d_z, z, PKJ_COMP * sizeof ( double ), hipMemcpyHostToDevice ); int *label_grpkj = ( int * ) malloc ( max_n_grpkj * sizeof ( int ) ); // PC labels are connected by each GrC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GrC int *num_syn_pkj = ( int * ) malloc ( n_pkj * sizeof ( int ) ); // # of synapses from each PC reset_array ( label_grpkj, max_n_grpkj ); reset_zero ( num_syn_gr, n_gr ); reset_zero ( num_syn_pkj, n_pkj ); int num_grpkj = 0; for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate int l_count = 0; for ( int i_pkj = 0; i_pkj < n_pkj; i_pkj++ ) { double lx_pkj = ( int ) ( i_pkj % nx_pkj ) * 32.0 + 8.0; // i_pkj's x-coordinate double ly_pkj = ( int ) ( i_pkj / nx_pkj ) * 32.0 + 8.0; // i_pkj's y-coordinate if ( abs ( lx_pkj - lx_gr ) < 120.0 && abs ( ly_pkj - ly_gr ) < 220.0 ) { label_grpkj [ i_gr * n_pkj + l_count ] = i_pkj; l_count ++; num_syn_pkj [ i_pkj ]++; } } num_syn_gr [ i_gr ] = l_count; num_grpkj += l_count * N_Syn_Per_GRPKJ; // Debug if ( l_count / N_Syn_Per_GRPKJ > n_pkj ) { printf ( "Error in grpkj_create\n" ); exit ( 1 ); } //printf ("num_grpkj -> %d\n", num_grpkj); } //Debug int l_max = -1; int l_min = 100000; for ( int i_pkj = 0; i_pkj < n_pkj; i_pkj++ ) { if ( l_max < num_syn_pkj [ i_pkj ] ){ l_max = num_syn_pkj [i_pkj]; } if ( l_min > num_syn_pkj [ i_pkj ] ){ l_min = num_syn_pkj [i_pkj]; } } printf ("max grpkj per cell = %d, min = %d\n", l_max, l_min); d_grpkj -> n = num_grpkj; hipMalloc ( ( int ** ) & ( d_grpkj -> comp ), syn_n_comp * num_grpkj * sizeof ( int ) ); hipMalloc ( ( double ** ) & ( d_grpkj -> elem ), grpkj_n_elem * num_grpkj * sizeof ( double ) ); printf ( "# of grpkj = %d\n", d_grpkj -> n ); // Copy host array to device array int *d_label_grpkj; int *d_num_syn_gr, *d_num_syn_pkj; hipMalloc ( ( int ** ) & ( d_label_grpkj ), max_n_grpkj * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); hipMalloc ( ( int ** ) & ( d_num_syn_pkj ), n_pkj * sizeof ( int ) ); hipMemcpy ( d_label_grpkj, label_grpkj, max_n_grpkj * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), hipMemcpyHostToDevice ); hipMemcpy ( d_num_syn_pkj, num_syn_pkj, n_pkj * sizeof ( int ), hipMemcpyHostToDevice ); hiprandState_t *l_state; hipMalloc ( ( void ** ) &( l_state ), num_grpkj * sizeof ( hiprandState_t ) ); hipLaunchKernelGGL(( setCurand) , dim3(( num_grpkj + 127 ) / 128), dim3(128) , 0, 0, rand (), l_state, num_grpkj ); hipLaunchKernelGGL(( grpkj_initialize) , dim3(( ( n_gr ) + 127 ) / 128), dim3(128) , 0, 0, d_grpkj -> comp, d_grpkj -> elem, nx_gr, ny_gr, nx_pkj, ny_pkj, num_grpkj, d_label_grpkj, d_num_syn_gr, d_x, d_z, l_state, d_num_syn_pkj ); hipDeviceSynchronize(); // Debug //printf ("\nDebug for grpkj\n"); //printf_GPU <<< 1, 1 >>> ( d_grpkj -> comp, d_grpkj -> elem, num_grpkj, syn_n_comp, grpkj_n_elem ); //hipDeviceSynchronize(); free ( label_grpkj ); free ( num_syn_gr ); free ( num_syn_pkj ); hipFree ( d_label_grpkj ); hipFree ( d_num_syn_gr ); hipFree ( d_num_syn_pkj ); hipFree ( d_x ); hipFree ( d_z ); return d_grpkj; } __global__ static void grpkj_update ( int *grpkj_comp, double *grpkj_elem, const int num_grpkj, neuron_t *d_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_grpkj ) { int firing_flag = 0; int pre_num = grpkj_comp [ pre_comp * num_grpkj + id ]; double pre_comp_v = d_gr -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && grpkj_elem [ grpkj_old_v * num_grpkj + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.98505259729 //tau = 8.3 //grpkj_elem [ grpkj_ampa * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * 0.98505259729 + firing_flag; // Decay = exp(-0.125 / tau ) = 0.81193634615 //tau = 0.6 = exp(-0.25 / tau ) = 0.6592406302 grpkj_elem [ grpkj_ampa * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * D_GRPKJ_AMPA + firing_flag;// tau = 0.6 (Llano et al., 1991) grpkj_elem [ grpkj_val * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * grpkj_elem [ grpkj_weight * num_grpkj + id ] * G_GRPKJ_AMPA; grpkj_elem [ grpkj_old_v * num_grpkj + id ] = pre_comp_v; // Debug //printf ("ampa = %f, val = %f\n",grpkj_elem [ grpkj_ampa * num_grpkj + id ],grpkj_elem [ grpkj_weight * num_grpkj + id ]); } } __host__ void grpkj_finalize ( synapse_t *d_grpkj, const int n_grpkj ) { if ( n_grpkj > 0 ) { hipFree ( d_grpkj -> comp ); hipFree ( d_grpkj -> elem ); } free ( d_grpkj ); } //////////////////////////////// MLIPKJ ////////////////////////////// __global__ static void mlipkj_initialize ( int *d_comp, double *d_elem, const int num_mlipkj, const int n_pkj, const int *d_postcomp_mlipkj ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_pkj ) { for( int j = 0; j < N_Syn_Per_MLIPKJ - 1; j++ ) { d_comp [ pre_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = id; // not use d_comp [ post_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = d_postcomp_mlipkj [ j ] + id * PKJ_COMP; d_elem [ mlipkj_gaba * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; d_elem [ mlipkj_weight * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = W_MLIPKJ / ( N_Syn_Per_MLIPKJ * 1.0); d_elem [ mlipkj_val * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; } int j = N_Syn_Per_MLIPKJ - 1; d_comp [ pre_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = id; // not use d_comp [ post_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = d_postcomp_mlipkj [ j ] + id * PKJ_COMP; d_elem [ mlipkj_gaba * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; d_elem [ mlipkj_weight * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = W_MLIPKJ / ( 1.0 ); d_elem [ mlipkj_val * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; } } __host__ synapse_t *mlipkj_create ( const int n_pkj, const int n_gr ) { int num_mlipkj = n_pkj * N_Syn_Per_MLIPKJ; synapse_t *d_mlipkj = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_gr == 0 || n_pkj == 0 ) { d_mlipkj -> n = 0; printf ( "# of mlipkj = 0\n" ); return d_mlipkj; } else { d_mlipkj -> n = num_mlipkj; printf ( "# of mlipkj = %d\n", num_mlipkj ); } hipMalloc ( ( int ** ) & ( d_mlipkj -> comp ), syn_n_comp * num_mlipkj * sizeof ( int ) ); hipMalloc ( ( double ** ) & ( d_mlipkj -> elem ), mlipkj_n_elem * num_mlipkj * sizeof ( double ) ); d_mlipkj -> f_out = fopen ( "MLI_RASTER.csv", "w" ); int *h_postcomp_mlipkj = ( int * ) malloc ( N_Syn_Per_MLIPKJ * sizeof ( int ) ); int *d_postcomp_mlipkj; hipMalloc ( ( int ** ) & ( d_postcomp_mlipkj ), N_Syn_Per_MLIPKJ * sizeof ( int ) ); reset_array ( h_postcomp_mlipkj, N_Syn_Per_MLIPKJ ); for ( int i = 0; i < N_Syn_Per_MLIPKJ - 1; i++ ) { while ( 1 ) { int r = rand () % ( PKJ_COMP - 10 ); // pkj comps : 1590 ~ 1599 -> main dends or soma for ( int j = 0; j < i; j++ ) { if ( h_postcomp_mlipkj [ j ] == r ) { continue; } } h_postcomp_mlipkj [ i ] = r; break; } } h_postcomp_mlipkj [ N_Syn_Per_MLIPKJ - 1 ] = 1599; // Debug //printf ("Debug for mlipkj"); //for ( int i = 0; i < N_Syn_Per_MLIPKJ; i++ ) { printf ("%d, ", h_postcomp_mlipkj [ i ]); } //printf ("\n"); hipMemcpy ( d_postcomp_mlipkj, h_postcomp_mlipkj, N_Syn_Per_MLIPKJ * sizeof ( int ), hipMemcpyHostToDevice ); hipLaunchKernelGGL(( mlipkj_initialize) , dim3(( ( n_pkj ) + 127 ) / 128), dim3(128) , 0, 0, d_mlipkj -> comp, d_mlipkj -> elem, num_mlipkj, n_pkj, d_postcomp_mlipkj ); // Set rand hipMalloc ( ( void ** ) &( d_mlipkj -> cstate ), num_mlipkj * sizeof ( hiprandState_t ) ); hipLaunchKernelGGL(( setCurand) , dim3(( num_mlipkj + 127 ) / 128), dim3(128) , 0, 0, rand (), d_mlipkj -> cstate, num_mlipkj ); free ( h_postcomp_mlipkj ); hipFree ( d_postcomp_mlipkj ); return d_mlipkj; } __global__ void mlipkj_update ( int *mlipkj_comp, double *mlipkj_elem, const double t, const int num_mlipkj, hiprandState_t *S ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_mlipkj ) { int firing_flag; double fr_mli = 30.0; //( S_Stimuli <= t && t < E_Stimuli + 200.0 )? fr_mli = 150.0 : fr_mli = 30.0; //( 0 <= t && t < 500.0 )? fr_mli = 0.0 : ( S_Stimuli + 50.0 <= t && t < E_Stimuli + 50.0 )? fr_mli = 30.0: //( E_Stimuli <= t && t < E_Stimuli + 50.0 )? fr_mli = 80.0: fr_mli = 30.0; //( S_Stimuli + 100.0 <= t && t < E_Stimuli )? fr_mli = 50.0: //( E_Stimuli <= t && t < E_Stimuli + 100.0 )? fr_mli = 150.0: double f = hiprand_uniform ( & ( S [ id ] ) ); ( fr_mli * 0.125 * 0.001 > f )? firing_flag = 1 : firing_flag = 0; // Decay :exp( - DT / tau ) = 0.98757780049 ( DT = 0.125 )//tau = 10.0 mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] = mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] * 0.98757780049 + firing_flag; mlipkj_elem [ mlipkj_val * num_mlipkj + id ] = mlipkj_elem [ mlipkj_weight * num_mlipkj + id ] * ( G_MLIPKJ_GABA * mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] ); } } __host__ void mlipkj_finalize ( synapse_t *d_mlipkj , const int n ) { if ( n > 0 ) { hipFree ( d_mlipkj -> comp ); hipFree ( d_mlipkj -> elem ); hipFree ( d_mlipkj -> cstate ); fclose ( d_mlipkj -> f_out ); } free ( d_mlipkj ); } __host__ void mli_output_file ( synapse_t *d_mlipkj, const double t, neuron_t *p_pkj ) { FILE *f = d_mlipkj -> f_out; double *ret = ( double * ) malloc ( sizeof ( double ) * mlipkj_n_elem * d_mlipkj -> n ); hipMemcpy ( ret, d_mlipkj -> elem, mlipkj_n_elem * d_mlipkj -> n * sizeof ( double ), hipMemcpyDeviceToHost ); double val = 0.0; fprintf ( f, "%lf,", t ); for ( int j = 0; j < d_mlipkj -> n; j++ ) { val = G_MLIPKJ_GABA * ret [ mlipkj_gaba * d_mlipkj -> n + j ]; val *= ret [ mlipkj_weight * d_mlipkj -> n + j ] *1000000; fprintf ( f, "%lf,", val ); } fprintf ( f, "\n" ); free ( ret ); } ///////////////////////////////////////////////////////////////////////// __host__ void gr_synapse_update ( const double t, const double DT, synapse_t *d_mfgr, synapse_t *d_gogr, neuron_t *d_go, neuron_solve_t *p_gr_solve ) { //static int count = 0; if ( 0 == strncmp ( p_gr_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ hipLaunchKernelGGL(( mfgr_update) , dim3(( d_mfgr -> n + 127 ) / 128), dim3(128) , 0, 0, d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) {hipLaunchKernelGGL(( gogr_update) , dim3(( d_gogr -> n + 127 ) / 128), dim3(128) , 0, 0, d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ hipLaunchKernelGGL(( mfgr_update) , dim3(( d_mfgr -> n + 127 ) / 128), dim3(128) , 0, 0, d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) {hipLaunchKernelGGL(( gogr_update) , dim3(( d_gogr -> n + 127 ) / 128), dim3(128) , 0, 0, d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) { hipLaunchKernelGGL(( mfgr_update) , dim3(( d_mfgr -> n + 127 ) / 128), dim3(128) , 0, 0, d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) {hipLaunchKernelGGL(( gogr_update) , dim3(( d_gogr -> n + 127 ) / 128), dim3(128) , 0, 0, d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } } } __host__ void go_synapse_update ( const double t, const double DT, synapse_t *d_grgo, neuron_t *d_gr, neuron_solve_t *p_go_solve ) { //static int count = 0; if ( 0 == strncmp ( p_go_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grgo -> n > 0 ) {hipLaunchKernelGGL(( grgo_update) , dim3(( d_grgo -> n + 127 ) / 128), dim3(128) , 0, 0, d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_go_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grgo -> n > 0 ) {hipLaunchKernelGGL(( grgo_update) , dim3(( d_grgo -> n + 127 ) / 128), dim3(128) , 0, 0, d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_go_solve -> type, "RKC", 3 ) ) { if ( d_grgo -> n > 0 ) {hipLaunchKernelGGL(( grgo_update) , dim3(( d_grgo -> n + 127 ) / 128), dim3(128) , 0, 0, d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } } } __host__ void pkj_synapse_update ( const double t, const double DT, synapse_t *d_grpkj, synapse_t *d_mlipkj, neuron_t *d_gr, neuron_solve_t *p_pkj_solve ) { //static int count = 0; if ( 0 == strncmp ( p_pkj_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grpkj -> n > 0 ) {hipLaunchKernelGGL(( grpkj_update) , dim3(( d_grpkj -> n + 127 ) / 128), dim3(128) , 0, 0, d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_pkj_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grpkj -> n > 0 ) {hipLaunchKernelGGL(( grpkj_update) , dim3(( d_grpkj -> n + 127 ) / 128), dim3(128) , 0, 0, d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } //if ( d_mlipkj -> n > 0 ) { mlipkj_update <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>> ( d_mlipkj -> comp, d_mlipkj -> elem, t, d_mlipkj -> n, d_mlipkj -> cstate ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_pkj_solve -> type, "RKC", 3 ) ) { if ( d_grpkj -> n > 0 ) {hipLaunchKernelGGL(( grpkj_update) , dim3(( d_grpkj -> n + 127 ) / 128), dim3(128) , 0, 0, d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } //if ( d_mlipkj -> n > 0 ) { mlipkj_update <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>> ( d_mlipkj -> comp, d_mlipkj -> elem, t, d_mlipkj -> n, d_mlipkj -> cstate ); } } }
c0e0fb6c7a74e645e5f211eb3bcf31730a1d5653.cu
#include "syn.cuh" #include <time.h> #include <math.h> #include <curand.h> #include <curand_kernel.h> // Debug __global__ static void printf_GPU ( const int *comp, const double *elem, const int n_syn, const int n_comp, const int n_elem ) { for ( int i = 0; i < n_syn; i++ ) { //printf ( "Pre = %d, PostNum = %d, PostComp = %d\n", comp [ i ] / 578, comp [ n_syn + i ] % 1600, comp [ n_syn + i ] / 1600 ); printf ( "Pre = %d, Post = %d\n", comp [ i ], comp [ n_syn + i ] ); for ( int j = 0; j < n_elem; j++ ) { printf ( "%f, ", elem [ j * n_syn + i ] ); } printf ("\n"); } } __host__ __device__ static void reset_array ( int * array, int num ) { for ( int i = 0; i < num; i++ ) { array [ i ] = -1; } } __host__ __device__ static void reset_zero ( int * array, int num ) { for ( int i = 0; i < num; i++ ) { array [ i ] = 0; } } __global__ void setCurand ( unsigned long seed, curandState *state, const int num ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num ) { curand_init ( seed, id, 0, & ( state [ id ] ) ); } } //////////////////////////////// MFGR ////////////////////////////// __global__ static void mfgr_initialize ( int *d_comp, double *d_elem, const int num_mfgr, const int n_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_gr ) { for( int j = 0; j < 4; j++ ) { d_comp [ pre_comp * num_mfgr + id * 4 + j ] = id; // not use d_comp [ post_comp * num_mfgr + id * 4 + j ] = j + id * GR_COMP; d_elem [ mfgr_ampa * num_mfgr + id * 4 + j ] = 0.0; d_elem [ mfgr_nmda * num_mfgr + id * 4 + j ] = 0.0; d_elem [ mfgr_weight * num_mfgr + id * 4 + j ] = W_MFGR / 4.0; d_elem [ mfgr_val * num_mfgr + id * 4 + j ] = 0.0; } } } __host__ synapse_t *mfgr_create ( const int n_gr ) { int num_mfgr = n_gr * 4; synapse_t *d_mfgr = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); d_mfgr -> n = num_mfgr; if ( num_mfgr == 0 ) { printf ( "# of mfgr = 0\n" ); return d_mfgr; } else { printf ( "# of mfgr = %d\n", num_mfgr ); } cudaMalloc ( ( int ** ) & ( d_mfgr -> comp ), syn_n_comp * num_mfgr * sizeof ( int ) ); cudaMalloc ( ( double ** ) & ( d_mfgr -> elem ), mfgr_n_elem * num_mfgr * sizeof ( double ) ); d_mfgr -> f_out = fopen ( "MF_RASTER.csv", "w" ); mfgr_initialize <<< ( ( n_gr ) + 127 ) / 128, 128 >>> ( d_mfgr -> comp, d_mfgr -> elem, num_mfgr, n_gr ); // Set rand cudaMalloc ( ( void ** ) &( d_mfgr -> cstate ), num_mfgr * sizeof ( curandState ) ); setCurand <<< ( num_mfgr + 127 ) / 128, 128 >>> ( rand (), d_mfgr -> cstate, num_mfgr ); return d_mfgr; } __global__ void mfgr_update ( int *mfgr_comp, double *mfgr_elem, const double t, const int num_mfgr, curandState *S ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_mfgr ) { int firing_flag; double fr_mf = 5.0; //( 0 <= t && t < 250 )? fr_mf = 60.0 : fr_mf = 5.0; ( S_Stimuli <= t && t < E_Stimuli )? fr_mf = 60.0 : fr_mf = 5.0; double f = curand_uniform ( & ( S [ id ] ) ); ( fr_mf * RKC_DT * 0.001 > f )? firing_flag = 1 : firing_flag = 0; // Decay :exp( - DT / tau ) = 0.90107510572 ( DT = 0.125, tau = 1.2 )// gmax = 24000e-9, 0.81193634615 (DT = 0.025) mfgr_elem [ mfgr_ampa * num_mfgr + id ] = mfgr_elem [ mfgr_ampa * num_mfgr + id ] * D_MFGR_AMPA + firing_flag; // Decay :exp( - DT / tau ) = 0.99759904077 ( DT = 0.125, tau = 52 ) // gmax = 32000e-9, 0.99520384614 (DT = 0.025) mfgr_elem [ mfgr_nmda * num_mfgr + id ] = mfgr_elem [ mfgr_ampa * num_mfgr + id ] * D_MFGR_NMDA + firing_flag; mfgr_elem [ mfgr_val * num_mfgr + id ] = mfgr_elem [ mfgr_weight * num_mfgr + id ] * ( G_MFGR_AMPA * mfgr_elem [ mfgr_ampa * num_mfgr + id ] + G_MFGR_NMDA * mfgr_elem [ mfgr_nmda * num_mfgr + id ] ); // 0.88 : 0.12 //int l_comp = mfgr_comp [ post_comp * num_mfgr + id ]; // d_gr -> elem [ g_syn ] [ l_comp ] = mfgr_elem [ mfgr_val * num_mfgr + id ]; // +=大丈夫 } } __host__ void mf_output_file ( synapse_t *d_mfgr, const double t, neuron_t *p_gr ) { FILE *f = d_mfgr -> f_out; double *ret = ( double * ) malloc ( sizeof ( double ) * mfgr_n_elem * d_mfgr -> n ); cudaMemcpy ( ret, d_mfgr -> elem, mfgr_n_elem * d_mfgr -> n * sizeof ( double ), cudaMemcpyDeviceToHost ); double val = 0.0; fprintf ( f, "%lf,", t ); for ( int j = 0; j < d_mfgr -> n; j++ ) { val = G_MFGR_AMPA * 0.88 * ret [ mfgr_ampa * d_mfgr -> n + j ] + G_MFGR_NMDA * 0.12 * ret [ mfgr_nmda * d_mfgr -> n + j ]; val *= ret [ mfgr_weight * d_mfgr -> n + j ] *1000000; fprintf ( f, "%lf,", val ); } fprintf ( f, "\n" ); free ( ret ); } __host__ void mfgr_finalize ( synapse_t *d_mfgr , const int n_gr ) { if ( n_gr > 0 ) { cudaFree ( d_mfgr -> comp ); cudaFree ( d_mfgr -> elem ); cudaFree ( d_mfgr -> cstate ); fclose ( d_mfgr -> f_out ); } free ( d_mfgr ); } //////////////////////////////// GRGO ////////////////////////////// __global__ static void grgo_initialize ( int *d_comp, double *d_elem, const int n_gr, const int nx_gr, const int ny_gr, const int nx_go, const int ny_go, const int num_grgo, const int *d_label_gogr, const int *d_num_syn_gr, const int *d_num_syn_go ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_gr ) { int l_num_syn = d_num_syn_gr [ id ]; // # of synapses from id-th GrC int s_syn_id = 0; // # of synapses from GrCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_gr [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_gr = id; int l_n_go = d_label_gogr [ l_n_gr * ny_go + i - s_syn_id ]; int iy_gr = l_n_gr / nx_gr; int iy_go = l_n_go / nx_go; float diff_grgo = 8.0 + iy_go * 32.0 - iy_gr * 16.0; // Distance between GrC and GoC somas int gr_ax_comp = 250 + 77 - ( int ) ( diff_grgo / 10.0 ); // Debug if ( l_n_go < 0 || fabs ( diff_grgo ) > 220.0 || gr_ax_comp > 577 ) { printf ( "error in grgo_initialize\n" ); } d_comp [ pre_comp * num_grgo + i ] = gr_ax_comp + GR_COMP * l_n_gr; d_comp [ post_comp * num_grgo + i ] = 0 + GO_COMP * l_n_go; // soma d_elem [ grgo_ampa * num_grgo + i ] = 0.0; d_elem [ grgo_nmda1 * num_grgo + i ] = 0.0; d_elem [ grgo_nmda2 * num_grgo + i ] = 0.0; d_elem [ grgo_weight * num_grgo + i ] = W_GRGO / ( d_num_syn_go [ l_n_go ] * 1.0/4.0 );//W_GRGO / ( l_num_syn * 1.0 ); d_elem [ grgo_val * num_grgo + i ] = 0.0; d_elem [ grgo_old_v * num_grgo + i ] = 1000.0; } } } __host__ synapse_t *grgo_create ( const int nx_gr, const int ny_gr, const int nx_go, const int ny_go ) { int n_gr = nx_gr * ny_gr; int n_go = nx_go * ny_go; int max_n_grgo = ny_go * n_gr; synapse_t *d_grgo = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_go * n_gr == 0 ) { d_grgo -> n = n_go * n_gr; printf ( "# of grgo = 0\n" ); return d_grgo; } int *label_grgo = ( int * ) malloc ( max_n_grgo * sizeof ( int ) ); // GoC labels are connected by each GrC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GrC int *num_syn_go = ( int * ) malloc ( n_go * sizeof ( int ) ); // # of synapses from each GoC reset_array ( label_grgo, max_n_grgo ); reset_zero ( num_syn_gr, n_gr ); reset_zero ( num_syn_go, n_go ); int num_grgo = 0; for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate int l_count = 0; for ( int i_go = 0; i_go < n_go; i_go++ ) { double lx_go = ( int ) ( i_go % nx_go ) * 32.0 + 8.0; // i_go's x-coordinate double ly_go = ( int ) ( i_go / nx_go ) * 32.0 + 8.0; // i_go's y-coordinate if ( abs ( lx_go - lx_gr ) < 16.0 && abs ( ly_go - ly_gr ) < 220.0 ) { label_grgo [ i_gr * ny_go + l_count ] = i_go; l_count++; num_syn_go [ i_go ]++; } } num_syn_gr [ i_gr ] = l_count; num_grgo += l_count; // Debug if ( l_count > ny_go ) { printf ( "Error in grgo_create\n" ); exit ( 1 ); } } d_grgo -> n = num_grgo; cudaMalloc ( ( int ** ) & ( d_grgo -> comp ), syn_n_comp * num_grgo * sizeof ( int ) ); cudaMalloc ( ( double ** ) & ( d_grgo -> elem ), grgo_n_elem * num_grgo * sizeof ( double ) ); printf ( "# of grgo = %d\n", d_grgo -> n ); // Copy host array to device array int *d_label_grgo; int *d_num_syn_gr; int *d_num_syn_go; cudaMalloc ( ( int ** ) & ( d_label_grgo ), max_n_grgo * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_go ), n_go * sizeof ( int ) ); cudaMemcpy ( d_label_grgo, label_grgo, max_n_grgo * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_go, num_syn_go, n_go * sizeof ( int ), cudaMemcpyHostToDevice ); grgo_initialize <<< ( ( n_gr ) + 127 ) / 128, 128 >>> ( d_grgo -> comp, d_grgo -> elem, n_gr, nx_gr, ny_gr, nx_go, ny_go, num_grgo, d_label_grgo, d_num_syn_gr, d_num_syn_go ); // Debug //printf ("\nDebug for grgo"); //printf_GPU <<< 1, 1 >>> ( d_grgo -> comp, d_grgo -> elem, num_grgo, syn_n_comp, grgo_n_elem ); //cudaDeviceSynchronize(); free ( label_grgo ); free ( num_syn_gr ); free ( num_syn_go ); cudaFree ( d_label_grgo ); cudaFree ( d_num_syn_gr ); cudaFree ( d_num_syn_go ); return d_grgo; } __global__ static void grgo_update ( int *grgo_comp, double *grgo_elem, const int num_grgo, neuron_t *d_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_grgo ) { int firing_flag = 0; int pre_num = grgo_comp [ pre_comp * num_grgo + id ]; double pre_comp_v = d_gr -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && grgo_elem [ grgo_old_v * num_grgo + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.92004441462 //tau = 1.5 = exp(-0.25 / tau ) = 0.84648172489 grgo_elem [ grgo_ampa * num_grgo + id ] = grgo_elem [ grgo_ampa * num_grgo + id ] * D_GRGO_AMPA + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99597586057 //tau = 31 = exp(-0.25 / tau ) = 0.99196791484 grgo_elem [ grgo_nmda1 * num_grgo + id ] = grgo_elem [ grgo_nmda1 * num_grgo + id ] * D_GRGO_NMDA1 + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99926497614 //tau = 170 = exp(-0.25 / tau ) = 0.99853049255 grgo_elem [ grgo_nmda2 * num_grgo + id ] = grgo_elem [ grgo_nmda2 * num_grgo + id ] * D_GRGO_NMDA2 + firing_flag; grgo_elem [ grgo_val * num_grgo + id ] = grgo_elem [ grgo_weight * num_grgo + id ] * ( G_GRGO_AMPA * grgo_elem [ grgo_ampa * num_grgo + id ] + G_GRGO_NMDA * ( 0.33 * grgo_elem [ grgo_nmda1 * num_grgo + id ] + 0.67 * grgo_elem [ grgo_nmda2 * num_grgo + id ] ) ); grgo_elem [ grgo_old_v * num_grgo + id ] = pre_comp_v; } } __host__ void grgo_finalize ( synapse_t *d_grgo, const int n_grgo ) { if ( n_grgo > 0 ) { cudaFree ( d_grgo -> comp ); cudaFree ( d_grgo -> elem ); //fclose ( d_grgo -> f_out ); } free ( d_grgo ); } //////////////////////////////// GOGR ////////////////////////////// __global__ static void gogr_initialize3 ( int *d_comp, double *d_elem, const int n_go, const int n_gr, const int num_gogr, const int *d_label_gogr, const int *d_num_syn_go, const int *d_num_syn_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_go ) { int l_num_syn = d_num_syn_go [ id ]; // # of synapses from id-th GoC int s_syn_id = 0; // # of synapses from GoCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_go [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_go = id; int l_n_gr = d_label_gogr [ l_n_go * 4 * 4 + i - s_syn_id ]; // Debug if ( l_n_gr < 0 ) { printf ( "error in gogr_initialize\n" ); } d_comp [ pre_comp * num_gogr + i ] = ( GO_COMP_DEND * 3 ) + GO_COMP * l_n_go; d_comp [ post_comp * num_gogr + i ] = 16 + GR_COMP * l_n_gr; // soma d_elem [ gogr_gabaa * num_gogr + i ] = 0.0; d_elem [ gogr_gabab * num_gogr + i ] = 0.0; d_elem [ gogr_weight * num_gogr + i ] = W_GOGR / ( d_num_syn_gr [ l_n_gr ] * 4.0 ); d_elem [ gogr_val * num_gogr + i ] = 0.0; d_elem [ gogr_old_v * num_gogr + i ] = 1000.0; } } } __host__ synapse_t *gogr_create ( const int nx_go, const int ny_go, const int nx_gr, const int ny_gr ) { int n_go = nx_go * ny_go; int n_gr = nx_gr * ny_gr; int max_n_gogr = n_go * 4 * 4; synapse_t *d_gogr = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_go * n_gr == 0 ) { d_gogr -> n = n_go * n_gr; printf ( "# of gogr = 0\n" ); return d_gogr; } int *label_gogr = ( int * ) malloc ( max_n_gogr * sizeof ( int ) ); // GrC labels are connected by each GoC int *num_syn_go = ( int * ) malloc ( n_go * sizeof ( int ) ); // # of synapses from each GoC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GoC reset_array ( label_gogr, max_n_gogr ); reset_zero ( num_syn_go, n_go ); reset_zero ( num_syn_gr, n_gr ); int num_gogr = 0; for ( int i_go = 0; i_go < n_go; i_go++ ) { int l_count = 0; double lx_go = ( int ) ( i_go % nx_go ) * 32.0 + 8.0; // i_go's x-coordinate double ly_go = ( int ) ( i_go / nx_go ) * 32.0 + 8.0; // i_go's y-coordinate for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate if ( fabs ( lx_go - lx_gr ) < 32.0 && fabs ( ly_go - ly_gr ) < 32.0 ) { label_gogr [ i_go * 4 * 4 + l_count ] = i_gr; l_count++; num_syn_gr [ i_gr ]++; } } num_syn_go [ i_go ] = l_count; num_gogr += l_count; // Debug if ( l_count > 4 * n_gr ) { printf ( "Error in gogr_create\n" ); exit ( 1 ); } } d_gogr -> n = num_gogr; cudaMalloc ( ( int ** ) & ( d_gogr -> comp ), syn_n_comp * num_gogr * sizeof ( int ) ); cudaMalloc ( ( double ** ) & ( d_gogr -> elem ), gogr_n_elem * num_gogr * sizeof ( double ) ); printf ( "# of gogr = %d\n", d_gogr -> n ); // Copy host array to device array int *d_label_gogr; int *d_num_syn_go, *d_num_syn_gr; cudaMalloc ( ( int ** ) & ( d_label_gogr ), max_n_gogr * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_go ), n_go * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); cudaMemcpy ( d_label_gogr, label_gogr, max_n_gogr * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_go, num_syn_go, n_go * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), cudaMemcpyHostToDevice ); gogr_initialize3 <<< ( ( n_go ) + 127 ) / 128, 128 >>> ( d_gogr -> comp, d_gogr -> elem, n_go, n_gr, num_gogr, d_label_gogr, d_num_syn_go, d_num_syn_gr ); free ( label_gogr ); free ( num_syn_go ); free ( num_syn_gr ); cudaFree ( d_label_gogr ); cudaFree ( d_num_syn_go ); cudaFree ( d_num_syn_gr ); // Debug //printf ("\nDebug for gogr"); //printf_GPU <<< 1, 1 >>> ( d_gogr -> comp, d_gogr -> elem, num_gogr, syn_n_comp, gogr_n_elem ); //cudaDeviceSynchronize(); return d_gogr; } __global__ static void gogr_update ( int *gogr_comp, double *gogr_elem, const int num_gogr, neuron_t *d_go ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_gogr ) { int firing_flag = 0; int pre_num = gogr_comp [ pre_comp * num_gogr + id ]; double pre_comp_v = d_go -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && gogr_elem [ gogr_old_v * num_gogr + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.9823013511 //tau = 7 = exp(-0.25 / tau ) = 0.96491594437 gogr_elem [ gogr_gabaa * num_gogr + id ] = gogr_elem [ gogr_gabaa * num_gogr + id ] * D_GOGR_GABAA + firing_flag; // Decay = exp(-0.125 / tau ) = 0.99788359867 //tau = 59 = exp(-0.25 / tau ) = 0.9957716765 gogr_elem [ gogr_gabab * num_gogr + id ] = gogr_elem [ gogr_gabab * num_gogr + id ] * D_GOGR_GABAB + firing_flag; gogr_elem [ gogr_val * num_gogr + id ] = gogr_elem [ gogr_weight * num_gogr + id ] * G_GOGR_GABA * ( 0.43 * gogr_elem [ gogr_gabaa * num_gogr + id ] + 0.57 * gogr_elem [ gogr_gabab * num_gogr + id ] ); gogr_elem [ gogr_old_v * num_gogr + id ] = pre_comp_v; //int l_comp = gogr_comp [ post_comp * num_gogr + id ]; //d_gr -> elem [ g_syn ] [ l_comp ] += l_val; // +=大丈夫 } } __host__ void gogr_finalize ( synapse_t *d_gogr, const int n_gogr ) { if ( n_gogr > 0 ) { cudaFree ( d_gogr -> comp ); cudaFree ( d_gogr -> elem ); //cudaFree ( d_gogr -> cstate ); //fclose ( d_gogr -> f_out ); } free ( d_gogr ); } //////////////////////////////// GRPKJ ////////////////////////////// __global__ static void grpkj_initialize ( int *d_comp, double *d_elem, const int nx_gr, const int ny_gr, const int nx_pkj, const int ny_pkj, const int num_grpkj, const int *d_label_grpkj, const int *d_num_syn_gr, const double *d_x, const double *d_z, curandState *S, const int *d_num_syn_pkj ) { int id = threadIdx.x + blockIdx.x * blockDim.x; int n_gr = nx_gr * ny_gr; int n_pkj = nx_pkj * ny_pkj; if ( id < n_gr ) { int l_num_syn = d_num_syn_gr [ id ]; // # of synapses from id-th GrC int s_syn_id = 0; // # of synapses from GrCs 0 to (id-1) for ( int j = 0; j < id; j++ ) { s_syn_id += d_num_syn_gr [ j ]; } for ( int i = s_syn_id; i < s_syn_id + l_num_syn; i++ ) { int l_n_gr = id; int l_n_pkj = d_label_grpkj [ l_n_gr * n_pkj + i - s_syn_id ]; int ix_gr = l_n_gr % nx_gr; int ix_pkj = l_n_pkj % nx_pkj; int iy_gr = l_n_gr / nx_gr; int iy_pkj = l_n_pkj / nx_pkj; double diff_x = ix_gr * 16.0 - ( 8.0 + ix_pkj * 32.0 ); // x Distance between GrC and GoC somas double diff_y = iy_gr * 16.0 - ( 8.0 + iy_pkj * 32.0 ); // y Distance between GrC and GoC somas int gr_ax_comp = 250 + 77 + ( int ) ( diff_y / 10.0 ); int pkj_d_comp = -1; double min_x = 1000000.0; int guarantee = 1; //if ( ix_pkj < 3 || ( nx_pkj - ix_pkj ) < 4 ) { guarantee = 3; } //if ( iy_pkj < 3 || ( ny_pkj - iy_pkj ) < 4 ) { guarantee = 3; } for ( int i_cp = 0; i_cp < N_Syn_Per_GRPKJ; i_cp++ ) { double f = curand_uniform ( & ( S [ i * N_Syn_Per_GRPKJ + i_cp ] ) ) - 0.5; for ( int i_comp = 0; i_comp < PKJ_COMP; i_comp++ ) { if ( fabs ( d_x [ i_comp ] - diff_x + 10.0 * f ) < min_x ) { min_x = fabs ( d_x [ i_comp ] - diff_x + 10.0 * f ); pkj_d_comp = i_comp; } } // Debug if ( pkj_d_comp < 0 || l_n_pkj < 0 || fabs ( diff_y ) > 220.0 || gr_ax_comp > 577 ) { printf ( "error in grpkj_initialize\n" ); } //if ( id < 2 ) printf ("%d -> %d \n", gr_ax_comp + GR_COMP * l_n_gr, pkj_d_comp + PKJ_COMP * l_n_pkj ); d_comp [ pre_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = gr_ax_comp + GR_COMP * l_n_gr; d_comp [ post_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = pkj_d_comp + PKJ_COMP * l_n_pkj; //if ( id < 2 ) printf ("%d -> %d \n", // d_comp [ pre_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ], // d_comp [ post_comp * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] ); d_elem [ grpkj_ampa * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 0.0; d_elem [ grpkj_weight * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = W_GRPKJ / ( 512.0 * d_num_syn_pkj [ l_n_pkj ] * 1.0 * guarantee / 10.0 ); d_elem [ grpkj_val * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 0.0; d_elem [ grpkj_old_v * num_grpkj + i * N_Syn_Per_GRPKJ + i_cp ] = 1000.0; } } } } __host__ synapse_t *grpkj_create ( const int nx_gr, const int ny_gr, const int nx_pkj, const int ny_pkj ) { int n_gr = nx_gr * ny_gr; int n_pkj = nx_pkj * ny_pkj; int max_n_grpkj = n_pkj * n_gr; synapse_t *d_grpkj = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_pkj * n_gr == 0 ) { d_grpkj -> n = 0; printf ( "# of grpkj = 0\n" ); return d_grpkj; } // read PKJ_compartments location FILE *f_ = fopen ( "Pkj_location_info.csv", "r" ); if ( ! f_ ) { fprintf ( stderr, "no such file %s\n", "Pkj_location_info.csv" ); exit ( 1 ); } int type [ PKJ_COMP ]; double x [ PKJ_COMP ]; double z [ PKJ_COMP ]; int i1, i5; double i2, i3, i4; for ( int i = 0; i < PKJ_COMP; i++ ) { if ( fscanf ( f_, "%d,%lf,%lf,%lf,%d", &i1, &i2, &i3, &i4, &i5 ) == ( EOF ) ) { printf ( "PARAM_FILE_READING_ERROR\n" ); exit ( 1 ); } //l_comp [ i ] = i1; x [ i ] = i2; //y [ i ] = i3; z [ i ] = i4; type [ i ] = i5; } fclose ( f_ ); double *d_x, *d_z; cudaMalloc ( ( double ** ) & ( d_x ), PKJ_COMP * sizeof ( double ) ); cudaMalloc ( ( double ** ) & ( d_z ), PKJ_COMP * sizeof ( double ) ); cudaMemcpy ( d_x, x, PKJ_COMP * sizeof ( double ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_z, z, PKJ_COMP * sizeof ( double ), cudaMemcpyHostToDevice ); int *label_grpkj = ( int * ) malloc ( max_n_grpkj * sizeof ( int ) ); // PC labels are connected by each GrC int *num_syn_gr = ( int * ) malloc ( n_gr * sizeof ( int ) ); // # of synapses from each GrC int *num_syn_pkj = ( int * ) malloc ( n_pkj * sizeof ( int ) ); // # of synapses from each PC reset_array ( label_grpkj, max_n_grpkj ); reset_zero ( num_syn_gr, n_gr ); reset_zero ( num_syn_pkj, n_pkj ); int num_grpkj = 0; for ( int i_gr = 0; i_gr < n_gr; i_gr++ ) { double lx_gr = ( int ) ( i_gr % nx_gr ) * 16.0; // i_gr's x-coordinate double ly_gr = ( int ) ( i_gr / nx_gr ) * 16.0; // i_gr's y-coordinate int l_count = 0; for ( int i_pkj = 0; i_pkj < n_pkj; i_pkj++ ) { double lx_pkj = ( int ) ( i_pkj % nx_pkj ) * 32.0 + 8.0; // i_pkj's x-coordinate double ly_pkj = ( int ) ( i_pkj / nx_pkj ) * 32.0 + 8.0; // i_pkj's y-coordinate if ( abs ( lx_pkj - lx_gr ) < 120.0 && abs ( ly_pkj - ly_gr ) < 220.0 ) { label_grpkj [ i_gr * n_pkj + l_count ] = i_pkj; l_count ++; num_syn_pkj [ i_pkj ]++; } } num_syn_gr [ i_gr ] = l_count; num_grpkj += l_count * N_Syn_Per_GRPKJ; // Debug if ( l_count / N_Syn_Per_GRPKJ > n_pkj ) { printf ( "Error in grpkj_create\n" ); exit ( 1 ); } //printf ("num_grpkj -> %d\n", num_grpkj); } //Debug int l_max = -1; int l_min = 100000; for ( int i_pkj = 0; i_pkj < n_pkj; i_pkj++ ) { if ( l_max < num_syn_pkj [ i_pkj ] ){ l_max = num_syn_pkj [i_pkj]; } if ( l_min > num_syn_pkj [ i_pkj ] ){ l_min = num_syn_pkj [i_pkj]; } } printf ("max grpkj per cell = %d, min = %d\n", l_max, l_min); d_grpkj -> n = num_grpkj; cudaMalloc ( ( int ** ) & ( d_grpkj -> comp ), syn_n_comp * num_grpkj * sizeof ( int ) ); cudaMalloc ( ( double ** ) & ( d_grpkj -> elem ), grpkj_n_elem * num_grpkj * sizeof ( double ) ); printf ( "# of grpkj = %d\n", d_grpkj -> n ); // Copy host array to device array int *d_label_grpkj; int *d_num_syn_gr, *d_num_syn_pkj; cudaMalloc ( ( int ** ) & ( d_label_grpkj ), max_n_grpkj * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_gr ), n_gr * sizeof ( int ) ); cudaMalloc ( ( int ** ) & ( d_num_syn_pkj ), n_pkj * sizeof ( int ) ); cudaMemcpy ( d_label_grpkj, label_grpkj, max_n_grpkj * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_gr, num_syn_gr, n_gr * sizeof ( int ), cudaMemcpyHostToDevice ); cudaMemcpy ( d_num_syn_pkj, num_syn_pkj, n_pkj * sizeof ( int ), cudaMemcpyHostToDevice ); curandState *l_state; cudaMalloc ( ( void ** ) &( l_state ), num_grpkj * sizeof ( curandState ) ); setCurand <<< ( num_grpkj + 127 ) / 128, 128 >>> ( rand (), l_state, num_grpkj ); grpkj_initialize <<< ( ( n_gr ) + 127 ) / 128, 128 >>> ( d_grpkj -> comp, d_grpkj -> elem, nx_gr, ny_gr, nx_pkj, ny_pkj, num_grpkj, d_label_grpkj, d_num_syn_gr, d_x, d_z, l_state, d_num_syn_pkj ); cudaDeviceSynchronize(); // Debug //printf ("\nDebug for grpkj\n"); //printf_GPU <<< 1, 1 >>> ( d_grpkj -> comp, d_grpkj -> elem, num_grpkj, syn_n_comp, grpkj_n_elem ); //cudaDeviceSynchronize(); free ( label_grpkj ); free ( num_syn_gr ); free ( num_syn_pkj ); cudaFree ( d_label_grpkj ); cudaFree ( d_num_syn_gr ); cudaFree ( d_num_syn_pkj ); cudaFree ( d_x ); cudaFree ( d_z ); return d_grpkj; } __global__ static void grpkj_update ( int *grpkj_comp, double *grpkj_elem, const int num_grpkj, neuron_t *d_gr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_grpkj ) { int firing_flag = 0; int pre_num = grpkj_comp [ pre_comp * num_grpkj + id ]; double pre_comp_v = d_gr -> elem [ v ] [ pre_num ]; if ( pre_comp_v > 0.0 && grpkj_elem [ grpkj_old_v * num_grpkj + id ] < 0.0 ) { firing_flag = 1; } // Decay = exp(-0.125 / tau ) = 0.98505259729 //tau = 8.3 //grpkj_elem [ grpkj_ampa * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * 0.98505259729 + firing_flag; // Decay = exp(-0.125 / tau ) = 0.81193634615 //tau = 0.6 = exp(-0.25 / tau ) = 0.6592406302 grpkj_elem [ grpkj_ampa * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * D_GRPKJ_AMPA + firing_flag;// tau = 0.6 (Llano et al., 1991) grpkj_elem [ grpkj_val * num_grpkj + id ] = grpkj_elem [ grpkj_ampa * num_grpkj + id ] * grpkj_elem [ grpkj_weight * num_grpkj + id ] * G_GRPKJ_AMPA; grpkj_elem [ grpkj_old_v * num_grpkj + id ] = pre_comp_v; // Debug //printf ("ampa = %f, val = %f\n",grpkj_elem [ grpkj_ampa * num_grpkj + id ],grpkj_elem [ grpkj_weight * num_grpkj + id ]); } } __host__ void grpkj_finalize ( synapse_t *d_grpkj, const int n_grpkj ) { if ( n_grpkj > 0 ) { cudaFree ( d_grpkj -> comp ); cudaFree ( d_grpkj -> elem ); } free ( d_grpkj ); } //////////////////////////////// MLIPKJ ////////////////////////////// __global__ static void mlipkj_initialize ( int *d_comp, double *d_elem, const int num_mlipkj, const int n_pkj, const int *d_postcomp_mlipkj ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < n_pkj ) { for( int j = 0; j < N_Syn_Per_MLIPKJ - 1; j++ ) { d_comp [ pre_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = id; // not use d_comp [ post_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = d_postcomp_mlipkj [ j ] + id * PKJ_COMP; d_elem [ mlipkj_gaba * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; d_elem [ mlipkj_weight * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = W_MLIPKJ / ( N_Syn_Per_MLIPKJ * 1.0); d_elem [ mlipkj_val * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; } int j = N_Syn_Per_MLIPKJ - 1; d_comp [ pre_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = id; // not use d_comp [ post_comp * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = d_postcomp_mlipkj [ j ] + id * PKJ_COMP; d_elem [ mlipkj_gaba * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; d_elem [ mlipkj_weight * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = W_MLIPKJ / ( 1.0 ); d_elem [ mlipkj_val * num_mlipkj + id * N_Syn_Per_MLIPKJ + j ] = 0.0; } } __host__ synapse_t *mlipkj_create ( const int n_pkj, const int n_gr ) { int num_mlipkj = n_pkj * N_Syn_Per_MLIPKJ; synapse_t *d_mlipkj = ( synapse_t * ) malloc ( sizeof ( synapse_t ) ); if ( n_gr == 0 || n_pkj == 0 ) { d_mlipkj -> n = 0; printf ( "# of mlipkj = 0\n" ); return d_mlipkj; } else { d_mlipkj -> n = num_mlipkj; printf ( "# of mlipkj = %d\n", num_mlipkj ); } cudaMalloc ( ( int ** ) & ( d_mlipkj -> comp ), syn_n_comp * num_mlipkj * sizeof ( int ) ); cudaMalloc ( ( double ** ) & ( d_mlipkj -> elem ), mlipkj_n_elem * num_mlipkj * sizeof ( double ) ); d_mlipkj -> f_out = fopen ( "MLI_RASTER.csv", "w" ); int *h_postcomp_mlipkj = ( int * ) malloc ( N_Syn_Per_MLIPKJ * sizeof ( int ) ); int *d_postcomp_mlipkj; cudaMalloc ( ( int ** ) & ( d_postcomp_mlipkj ), N_Syn_Per_MLIPKJ * sizeof ( int ) ); reset_array ( h_postcomp_mlipkj, N_Syn_Per_MLIPKJ ); for ( int i = 0; i < N_Syn_Per_MLIPKJ - 1; i++ ) { while ( 1 ) { int r = rand () % ( PKJ_COMP - 10 ); // pkj comps : 1590 ~ 1599 -> main dends or soma for ( int j = 0; j < i; j++ ) { if ( h_postcomp_mlipkj [ j ] == r ) { continue; } } h_postcomp_mlipkj [ i ] = r; break; } } h_postcomp_mlipkj [ N_Syn_Per_MLIPKJ - 1 ] = 1599; // Debug //printf ("Debug for mlipkj"); //for ( int i = 0; i < N_Syn_Per_MLIPKJ; i++ ) { printf ("%d, ", h_postcomp_mlipkj [ i ]); } //printf ("\n"); cudaMemcpy ( d_postcomp_mlipkj, h_postcomp_mlipkj, N_Syn_Per_MLIPKJ * sizeof ( int ), cudaMemcpyHostToDevice ); mlipkj_initialize <<< ( ( n_pkj ) + 127 ) / 128, 128 >>> ( d_mlipkj -> comp, d_mlipkj -> elem, num_mlipkj, n_pkj, d_postcomp_mlipkj ); // Set rand cudaMalloc ( ( void ** ) &( d_mlipkj -> cstate ), num_mlipkj * sizeof ( curandState ) ); setCurand <<< ( num_mlipkj + 127 ) / 128, 128 >>> ( rand (), d_mlipkj -> cstate, num_mlipkj ); free ( h_postcomp_mlipkj ); cudaFree ( d_postcomp_mlipkj ); return d_mlipkj; } __global__ void mlipkj_update ( int *mlipkj_comp, double *mlipkj_elem, const double t, const int num_mlipkj, curandState *S ) { int id = threadIdx.x + blockIdx.x * blockDim.x; if ( id < num_mlipkj ) { int firing_flag; double fr_mli = 30.0; //( S_Stimuli <= t && t < E_Stimuli + 200.0 )? fr_mli = 150.0 : fr_mli = 30.0; //( 0 <= t && t < 500.0 )? fr_mli = 0.0 : ( S_Stimuli + 50.0 <= t && t < E_Stimuli + 50.0 )? fr_mli = 30.0: //( E_Stimuli <= t && t < E_Stimuli + 50.0 )? fr_mli = 80.0: fr_mli = 30.0; //( S_Stimuli + 100.0 <= t && t < E_Stimuli )? fr_mli = 50.0: //( E_Stimuli <= t && t < E_Stimuli + 100.0 )? fr_mli = 150.0: double f = curand_uniform ( & ( S [ id ] ) ); ( fr_mli * 0.125 * 0.001 > f )? firing_flag = 1 : firing_flag = 0; // Decay :exp( - DT / tau ) = 0.98757780049 ( DT = 0.125 )//tau = 10.0 mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] = mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] * 0.98757780049 + firing_flag; mlipkj_elem [ mlipkj_val * num_mlipkj + id ] = mlipkj_elem [ mlipkj_weight * num_mlipkj + id ] * ( G_MLIPKJ_GABA * mlipkj_elem [ mlipkj_gaba * num_mlipkj + id ] ); } } __host__ void mlipkj_finalize ( synapse_t *d_mlipkj , const int n ) { if ( n > 0 ) { cudaFree ( d_mlipkj -> comp ); cudaFree ( d_mlipkj -> elem ); cudaFree ( d_mlipkj -> cstate ); fclose ( d_mlipkj -> f_out ); } free ( d_mlipkj ); } __host__ void mli_output_file ( synapse_t *d_mlipkj, const double t, neuron_t *p_pkj ) { FILE *f = d_mlipkj -> f_out; double *ret = ( double * ) malloc ( sizeof ( double ) * mlipkj_n_elem * d_mlipkj -> n ); cudaMemcpy ( ret, d_mlipkj -> elem, mlipkj_n_elem * d_mlipkj -> n * sizeof ( double ), cudaMemcpyDeviceToHost ); double val = 0.0; fprintf ( f, "%lf,", t ); for ( int j = 0; j < d_mlipkj -> n; j++ ) { val = G_MLIPKJ_GABA * ret [ mlipkj_gaba * d_mlipkj -> n + j ]; val *= ret [ mlipkj_weight * d_mlipkj -> n + j ] *1000000; fprintf ( f, "%lf,", val ); } fprintf ( f, "\n" ); free ( ret ); } ///////////////////////////////////////////////////////////////////////// __host__ void gr_synapse_update ( const double t, const double DT, synapse_t *d_mfgr, synapse_t *d_gogr, neuron_t *d_go, neuron_solve_t *p_gr_solve ) { //static int count = 0; if ( 0 == strncmp ( p_gr_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ mfgr_update <<< ( d_mfgr -> n + 127 ) / 128, 128 >>> ( d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) { gogr_update <<< ( d_gogr -> n + 127 ) / 128, 128 >>> ( d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_gr_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ mfgr_update <<< ( d_mfgr -> n + 127 ) / 128, 128 >>> ( d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) { gogr_update <<< ( d_gogr -> n + 127 ) / 128, 128 >>> ( d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_gr_solve -> type, "RKC", 3 ) ) { mfgr_update <<< ( d_mfgr -> n + 127 ) / 128, 128 >>> ( d_mfgr -> comp, d_mfgr -> elem, t, d_mfgr -> n, d_mfgr -> cstate ); if ( d_gogr -> n > 0 ) { gogr_update <<< ( d_gogr -> n + 127 ) / 128, 128 >>> ( d_gogr -> comp, d_gogr -> elem, d_gogr -> n, d_go ); } } } __host__ void go_synapse_update ( const double t, const double DT, synapse_t *d_grgo, neuron_t *d_gr, neuron_solve_t *p_go_solve ) { //static int count = 0; if ( 0 == strncmp ( p_go_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grgo -> n > 0 ) { grgo_update <<< ( d_grgo -> n + 127 ) / 128, 128 >>> ( d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_go_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grgo -> n > 0 ) { grgo_update <<< ( d_grgo -> n + 127 ) / 128, 128 >>> ( d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_go_solve -> type, "RKC", 3 ) ) { if ( d_grgo -> n > 0 ) { grgo_update <<< ( d_grgo -> n + 127 ) / 128, 128 >>> ( d_grgo -> comp, d_grgo -> elem, d_grgo -> n, d_gr ); } } } __host__ void pkj_synapse_update ( const double t, const double DT, synapse_t *d_grpkj, synapse_t *d_mlipkj, neuron_t *d_gr, neuron_solve_t *p_pkj_solve ) { //static int count = 0; if ( 0 == strncmp ( p_pkj_solve -> type, "BE", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grpkj -> n > 0 ) { grpkj_update <<< ( d_grpkj -> n + 127 ) / 128, 128 >>> ( d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_pkj_solve -> type, "CN", 2 ) ) { //if ( count % 5 == 0 ) //{ if ( d_grpkj -> n > 0 ) { grpkj_update <<< ( d_grpkj -> n + 127 ) / 128, 128 >>> ( d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } //if ( d_mlipkj -> n > 0 ) { mlipkj_update <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>> ( d_mlipkj -> comp, d_mlipkj -> elem, t, d_mlipkj -> n, d_mlipkj -> cstate ); } // count = 0; //} //count++; } else if ( 0 == strncmp ( p_pkj_solve -> type, "RKC", 3 ) ) { if ( d_grpkj -> n > 0 ) { grpkj_update <<< ( d_grpkj -> n + 127 ) / 128, 128 >>> ( d_grpkj -> comp, d_grpkj -> elem, d_grpkj -> n, d_gr ); } //if ( d_mlipkj -> n > 0 ) { mlipkj_update <<< ( d_mlipkj -> n + 127 ) / 128, 128 >>> ( d_mlipkj -> comp, d_mlipkj -> elem, t, d_mlipkj -> n, d_mlipkj -> cstate ); } } }
8222b9cb1ae1611fb68de30d6b9d041ad21816d7.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <hip/hip_runtime.h> #include <iostream> #include "baseline.cu" #define NOUT_PER_THREADS 1 __global__ void Test(uint64_t *DStates, uint32_t *DOuts){ extern __shared__ uint64_t BStates[]; /// Read states to shared mem. BStates[threadIdx.x] = DStates[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); #pragma unroll for(int i=0;i<NOUT_PER_THREADS;i++){ DOuts[(blockIdx.x*blockDim.x + threadIdx.x)*NOUT_PER_THREADS+i] = pcg32_64(BStates[threadIdx.x],blockIdx.x*blockDim.x + threadIdx.x); } __syncthreads(); ///Save states back: DStates[blockIdx.x*blockDim.x + threadIdx.x] = BStates[threadIdx.x]; }; int main(int argc, char *argv[]){ unsigned int BlockSize_x = 256; unsigned int GridSize_x = 256; /// each thread will have one state. uint64_t* HStates = (uint64_t*)malloc(sizeof(uint64_t)*BlockSize_x*GridSize_x); uint64_t* DStates; uint32_t* HOuts = (uint32_t*)malloc(sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS); uint32_t* DOuts; /// Allocate device mem. if(hipMalloc((void**)&DStates, sizeof(uint64_t)*BlockSize_x*GridSize_x)){ fprintf(stderr,"ERROR, couldn't allocate Device Mem.%s","\n"); exit(1); } if(hipMalloc((void**)&DOuts, sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS)){ fprintf(stderr,"ERROR, couldn't allocate Device Mem.%s","\n"); exit(1); } ///Initialize, all the threads use same seed, the streams are attached to unique tids. ///Maximum total threads that can use same seed is limited with 2^63 for 64-bits states. ///Each stream geneartes unique RNGS with period 2^64 uint64_t seed = 99; for(unsigned int i=0;i<BlockSize_x*GridSize_x;i++) HStates[i] = seed; ///Move State -> Dev. hipMemcpy(DStates,HStates,sizeof(uint64_t)*BlockSize_x*GridSize_x,hipMemcpyHostToDevice); ///Launch Kernel: for(unsigned int i=0;i<10000;i++) hipLaunchKernelGGL(( Test), dim3(GridSize_x),dim3(BlockSize_x),sizeof(uint64_t)*BlockSize_x, 0, DStates,DOuts); printf("Done.%s","\n"); ///Get Result -> Loc. hipMemcpy(HOuts,DOuts,sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS,hipMemcpyDeviceToHost); free(HStates); free(HOuts); hipFree(DStates); hipFree(DOuts); }
8222b9cb1ae1611fb68de30d6b9d041ad21816d7.cu
#include <cstdlib> #include <cstdio> #include <unistd.h> #include <sys/stat.h> #include <sys/types.h> #include <cuda.h> #include <iostream> #include "baseline.cu" #define NOUT_PER_THREADS 1 __global__ void Test(uint64_t *DStates, uint32_t *DOuts){ extern __shared__ uint64_t BStates[]; /// Read states to shared mem. BStates[threadIdx.x] = DStates[blockIdx.x*blockDim.x + threadIdx.x]; __syncthreads(); #pragma unroll for(int i=0;i<NOUT_PER_THREADS;i++){ DOuts[(blockIdx.x*blockDim.x + threadIdx.x)*NOUT_PER_THREADS+i] = pcg32_64(BStates[threadIdx.x],blockIdx.x*blockDim.x + threadIdx.x); } __syncthreads(); ///Save states back: DStates[blockIdx.x*blockDim.x + threadIdx.x] = BStates[threadIdx.x]; }; int main(int argc, char *argv[]){ unsigned int BlockSize_x = 256; unsigned int GridSize_x = 256; /// each thread will have one state. uint64_t* HStates = (uint64_t*)malloc(sizeof(uint64_t)*BlockSize_x*GridSize_x); uint64_t* DStates; uint32_t* HOuts = (uint32_t*)malloc(sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS); uint32_t* DOuts; /// Allocate device mem. if(cudaMalloc((void**)&DStates, sizeof(uint64_t)*BlockSize_x*GridSize_x)){ fprintf(stderr,"ERROR, couldn't allocate Device Mem.%s","\n"); exit(1); } if(cudaMalloc((void**)&DOuts, sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS)){ fprintf(stderr,"ERROR, couldn't allocate Device Mem.%s","\n"); exit(1); } ///Initialize, all the threads use same seed, the streams are attached to unique tids. ///Maximum total threads that can use same seed is limited with 2^63 for 64-bits states. ///Each stream geneartes unique RNGS with period 2^64 uint64_t seed = 99; for(unsigned int i=0;i<BlockSize_x*GridSize_x;i++) HStates[i] = seed; ///Move State -> Dev. cudaMemcpy(DStates,HStates,sizeof(uint64_t)*BlockSize_x*GridSize_x,cudaMemcpyHostToDevice); ///Launch Kernel: for(unsigned int i=0;i<10000;i++) Test<<<GridSize_x,BlockSize_x,sizeof(uint64_t)*BlockSize_x>>>(DStates,DOuts); printf("Done.%s","\n"); ///Get Result -> Loc. cudaMemcpy(HOuts,DOuts,sizeof(uint32_t)*BlockSize_x*GridSize_x*NOUT_PER_THREADS,cudaMemcpyDeviceToHost); free(HStates); free(HOuts); cudaFree(DStates); cudaFree(DOuts); }
e83cdb7947460317618baa99941c49d3bbec424f.hip
// !!! This is a file automatically generated by hipify!!! #include "nfautil.h" #include <hip/hip_runtime.h> /* * Visualize the NFA in stdout */ int visited[5000]; int count[5000]; int visited_index = 0; int nstate; State matchstate = { Match }; /* matching state */ List l1, l2; static int listid; void addstate(List*, State*); void step(List*, int, List*); /* Compute initial state list */ List* startlist(State *start, List *l) { l->n = 0; listid++; addstate(l, start); return l; } /* Check whether state list contains a match. */ int ismatch(List *l) { int i; for(i=0; i<l->n; i++) if(l->s[i]->c == Match) return 1; return 0; } /* Add s to l, following unlabeled arrows. */ void addstate(List *l, State *s) { // lastlist check is present to ensure that if // multiple states point to this state, then only // one instance of the state is added to the list if(s == NULL || s->lastlist == listid) return; s->lastlist = listid; if(s->c == Split){ /* follow unlabeled arrows */ addstate(l, s->out); addstate(l, s->out1); return; } l->s[l->n++] = s; } /* * Step the NFA from the states in clist * past the character c, * to create next NFA state set nlist. */ void step(List *clist, int c, List *nlist) { int i; State *s; listid++; nlist->n = 0; for(i=0; i<clist->n; i++){ s = clist->s[i]; if(s->c == c || s->c == Any) addstate(nlist, s->out); } } /* Run NFA to determine whether it matches s. */ int match(State *start, char *s) { int c; List *clist, *nlist, *t; clist = startlist(start, &l1); nlist = &l2; for(; *s; s++){ c = *s & 0xFF; step(clist, c, nlist); t = clist; clist = nlist; nlist = t; // swap clist, nlist // check for a match in the middle of the string if (ismatch(clist)) return 1; } return ismatch(clist); } /* Check for a string match at all possible start positions */ int anyMatch(State *start, char *s) { int isMatch = match(start, s); int index = 0; int len = strlen(s); while (!isMatch && index <= len) { isMatch = match(start, s + index); index ++; } return isMatch; } /* Allocate and initialize State */ State* state(int c, State *out, State *out1) { State *s; s = (State *) malloc(sizeof *s); s->id = ++nstate; s->lastlist = 0; s->c = c; s->out = out; s->out1 = out1; // device pointer of itself // serves no real purpose other than to help transfer the NFA over s->dev = NULL; s->free = STATE_INIT; return s; } /* Initialize Frag struct. */ Frag frag(State *start, Ptrlist *out) { Frag n = { start, out }; return n; } /* Create singleton list containing just outp. */ Ptrlist* list1(State **outp) { Ptrlist *l; l = (Ptrlist*)outp; l->next = NULL; return l; } /* Patch the list of states at out to point to start. */ void patch(Ptrlist *l, State *s) { Ptrlist *next; for(; l; l=next){ next = l->next; l->s = s; } } /* Join the two lists l1 and l2, returning the combination. */ Ptrlist* append(Ptrlist *l1, Ptrlist *l2) { Ptrlist *oldl1; oldl1 = l1; while(l1->next) l1 = l1->next; l1->next = l2; return oldl1; } /* * Convert postfix regular expression to NFA. * Return start state. */ State* post2nfa(char *postfix) { char *p; Frag stack[1000], *stackp, e1, e2, e; State *s; // fprintf(stderr, "postfix: %s\n", postfix); if(postfix == NULL) return NULL; #define push(s) *stackp++ = s #define pop() *--stackp stackp = stack; for(p=postfix; *p; p++){ switch(*p){ case ANY: /* any (.) */ s = state(Any, NULL, NULL); push(frag(s, list1(&s->out))); break; default: s = state(*p, NULL, NULL); push(frag(s, list1(&s->out))); break; case CONCATENATE: /* catenate */ e2 = pop(); e1 = pop(); patch(e1.out, e2.start); push(frag(e1.start, e2.out)); break; case ALTERNATE: /* alternate (|)*/ e2 = pop(); e1 = pop(); s = state(Split, e1.start, e2.start); push(frag(s, append(e1.out, e2.out))); break; case QUESTION: /* zero or one (?)*/ e = pop(); s = state(Split, e.start, NULL); push(frag(s, append(e.out, list1(&s->out1)))); break; case STAR: /* zero or more (*)*/ e = pop(); s = state(Split, e.start, NULL); patch(e.out, s); push(frag(s, list1(&s->out1))); break; case PLUS: /* one or more (+)*/ e = pop(); s = state(Split, e.start, NULL); patch(e.out, s); push(frag(e.start, list1(&s->out1))); break; } } e = pop(); if(stackp != stack) return NULL; patch(e.out, &matchstate); return e.start; #undef pop #undef push } /* * Convert infix regexp re to postfix notation. * Insert ESC (or 0x1b) as explicit concatenation operator. * Cheesy parser, return static buffer. */ char* re2post(char *re) { int nalt, natom; static char buf[8000]; char *dst; struct { int nalt; int natom; } paren[100], *p; p = paren; dst = buf; nalt = 0; natom = 0; if(strlen(re) >= sizeof buf/2) return NULL; for(; *re; re++){ switch(*re){ case PAREN_OPEN: // ( if(natom > 1){ --natom; *dst++ = CONCATENATE; } if(p >= paren+100) return NULL; p->nalt = nalt; p->natom = natom; p++; nalt = 0; natom = 0; break; case ALTERNATE: // | if(natom == 0) return NULL; while(--natom > 0) *dst++ = CONCATENATE; nalt++; break; case PAREN_CLOSE: // ) if(p == paren) return NULL; if(natom == 0) return NULL; while(--natom > 0) *dst++ = CONCATENATE; for(; nalt > 0; nalt--) *dst++ = ALTERNATE; --p; nalt = p->nalt; natom = p->natom; natom++; break; case STAR: // * case PLUS: // + case QUESTION: // ? if(natom == 0) return NULL; *dst++ = *re; break; default: if(natom > 1){ --natom; *dst++ = CONCATENATE; } *dst++ = *re; natom++; break; } } if(p != paren) return NULL; while(--natom > 0) *dst++ = CONCATENATE; for(; nalt > 0; nalt--) *dst++ = ALTERNATE; *dst = 0; return buf; } void readFile (char *fileName, char ***lines, int *lineIndex) { FILE *fp = fopen(fileName, "r"); char *source = NULL; if (fp != NULL) { /* Go to the end of the file. */ if (fseek(fp, 0L, SEEK_END) == 0) { /* Get the size of the file. */ long bufsize = ftell(fp); if (bufsize == -1) { /* Error */ } /* Allocate our buffer to that size. */ source = (char *) malloc(sizeof(char) * (bufsize + 1)); /* Go back to the start of the file. */ if (fseek(fp, 0L, SEEK_SET) == 0) { /* Error */ } /* Read the entire file into memory. */ size_t newLen = fread(source, sizeof(char), bufsize, fp); if (newLen == 0) { fputs("Error reading file", stderr); } else { source[newLen] = '\0'; /* Just to be safe. */ } } fclose(fp); } *lines = (char **) malloc (sizeof(char *) * 1); **lines = source; *lineIndex = 1; } void usage(const char* progname) { printf("Usage: %s [options] [pattern] \n", progname); printf("Program Options:\n"); printf(" -v Visualize the NFA then exit\n"); printf(" -p View postfix expression then exit\n"); printf(" -s View simplified expression then exit\n"); printf(" -t Print timing data\n"); printf(" -f <FILE> --file Input file to be matched\n"); printf(" -r <FILE> --regex Input file with regexs\n"); printf(" -? This message\n"); printf("[pattern] required only if -r or --regex is not used\n"); } void parseCmdLine(int argc, char **argv, int *visualize, int *postfix, int *time, int *simplified, char **fileName, char **regexFile) { if (argc < 3) { usage(argv[0]); exit(EXIT_SUCCESS); } int opt; static struct option long_options[] = { {"help", no_argument, 0, '?'}, {"postfix", no_argument, 0, 'p'}, {"simplified", no_argument, 0, 's'}, {"visualize", no_argument, 0, 'v'}, {"file", required_argument, 0, 'f'}, {"regex", required_argument, 0, 'r'}, {"time", no_argument, 0, 't'}, {0 ,0, 0, 0} }; *visualize = 0; *postfix = 0; *time = 0; *simplified = 0; while ((opt = getopt_long_only(argc, argv, "tvpsf:r:?", long_options, NULL)) != EOF) { switch (opt) { case 'v': *visualize = 1; break; case 'p': *postfix = 1; break; case 'f': *fileName = optarg; break; case 'r': *regexFile = optarg; break; case 't': *time = 1; break; case 's': *simplified = 1; break; default: usage(argv[0]); exit(EXIT_SUCCESS); } } } int hasSeen(State * start, int * index) { int i; for (i = 0; i < 5000; i++) { if (visited[i] == start->id) { *index = i; return 0; } } return 1; } void visualize_nfa_help(State * start) { int index; if (start == NULL) { return; } if (hasSeen(start, &index) == 0) { if (count[index] > 0) { return; } } count[start->id]++; visited[start->id] = start->id; char data[10]; if (start->c == Match) { strcpy(data, "Match"); } else if (start->c == Split) { strcpy(data, "Split"); } else if (start->c == Any) { strcpy(data, "Any"); } else { sprintf(data, "Char %c", start->c); } int outId, outId1; outId = (start->out == NULL) ? -1 : start->out->id; outId1 = (start->out1 == NULL) ? -1 : start->out1->id; printf("{ \"id\": \"%d\", \"data\":\"%s\", \"out\":\"%d\", \"out1\":\"%d\" \n},", start->id, data, outId, outId1); visualize_nfa_help(start->out); visualize_nfa_help(start->out1); } void visualize_nfa(State * start) { memset(visited, 0, 5000*(sizeof(int))); memset(count, 0, 5000*(sizeof(int))); printf("["); visualize_nfa_help(start); printf("]\n"); } double gettime() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec / 1000000.0; }
e83cdb7947460317618baa99941c49d3bbec424f.cu
#include "nfautil.h" #include <cuda.h> /* * Visualize the NFA in stdout */ int visited[5000]; int count[5000]; int visited_index = 0; int nstate; State matchstate = { Match }; /* matching state */ List l1, l2; static int listid; void addstate(List*, State*); void step(List*, int, List*); /* Compute initial state list */ List* startlist(State *start, List *l) { l->n = 0; listid++; addstate(l, start); return l; } /* Check whether state list contains a match. */ int ismatch(List *l) { int i; for(i=0; i<l->n; i++) if(l->s[i]->c == Match) return 1; return 0; } /* Add s to l, following unlabeled arrows. */ void addstate(List *l, State *s) { // lastlist check is present to ensure that if // multiple states point to this state, then only // one instance of the state is added to the list if(s == NULL || s->lastlist == listid) return; s->lastlist = listid; if(s->c == Split){ /* follow unlabeled arrows */ addstate(l, s->out); addstate(l, s->out1); return; } l->s[l->n++] = s; } /* * Step the NFA from the states in clist * past the character c, * to create next NFA state set nlist. */ void step(List *clist, int c, List *nlist) { int i; State *s; listid++; nlist->n = 0; for(i=0; i<clist->n; i++){ s = clist->s[i]; if(s->c == c || s->c == Any) addstate(nlist, s->out); } } /* Run NFA to determine whether it matches s. */ int match(State *start, char *s) { int c; List *clist, *nlist, *t; clist = startlist(start, &l1); nlist = &l2; for(; *s; s++){ c = *s & 0xFF; step(clist, c, nlist); t = clist; clist = nlist; nlist = t; // swap clist, nlist // check for a match in the middle of the string if (ismatch(clist)) return 1; } return ismatch(clist); } /* Check for a string match at all possible start positions */ int anyMatch(State *start, char *s) { int isMatch = match(start, s); int index = 0; int len = strlen(s); while (!isMatch && index <= len) { isMatch = match(start, s + index); index ++; } return isMatch; } /* Allocate and initialize State */ State* state(int c, State *out, State *out1) { State *s; s = (State *) malloc(sizeof *s); s->id = ++nstate; s->lastlist = 0; s->c = c; s->out = out; s->out1 = out1; // device pointer of itself // serves no real purpose other than to help transfer the NFA over s->dev = NULL; s->free = STATE_INIT; return s; } /* Initialize Frag struct. */ Frag frag(State *start, Ptrlist *out) { Frag n = { start, out }; return n; } /* Create singleton list containing just outp. */ Ptrlist* list1(State **outp) { Ptrlist *l; l = (Ptrlist*)outp; l->next = NULL; return l; } /* Patch the list of states at out to point to start. */ void patch(Ptrlist *l, State *s) { Ptrlist *next; for(; l; l=next){ next = l->next; l->s = s; } } /* Join the two lists l1 and l2, returning the combination. */ Ptrlist* append(Ptrlist *l1, Ptrlist *l2) { Ptrlist *oldl1; oldl1 = l1; while(l1->next) l1 = l1->next; l1->next = l2; return oldl1; } /* * Convert postfix regular expression to NFA. * Return start state. */ State* post2nfa(char *postfix) { char *p; Frag stack[1000], *stackp, e1, e2, e; State *s; // fprintf(stderr, "postfix: %s\n", postfix); if(postfix == NULL) return NULL; #define push(s) *stackp++ = s #define pop() *--stackp stackp = stack; for(p=postfix; *p; p++){ switch(*p){ case ANY: /* any (.) */ s = state(Any, NULL, NULL); push(frag(s, list1(&s->out))); break; default: s = state(*p, NULL, NULL); push(frag(s, list1(&s->out))); break; case CONCATENATE: /* catenate */ e2 = pop(); e1 = pop(); patch(e1.out, e2.start); push(frag(e1.start, e2.out)); break; case ALTERNATE: /* alternate (|)*/ e2 = pop(); e1 = pop(); s = state(Split, e1.start, e2.start); push(frag(s, append(e1.out, e2.out))); break; case QUESTION: /* zero or one (?)*/ e = pop(); s = state(Split, e.start, NULL); push(frag(s, append(e.out, list1(&s->out1)))); break; case STAR: /* zero or more (*)*/ e = pop(); s = state(Split, e.start, NULL); patch(e.out, s); push(frag(s, list1(&s->out1))); break; case PLUS: /* one or more (+)*/ e = pop(); s = state(Split, e.start, NULL); patch(e.out, s); push(frag(e.start, list1(&s->out1))); break; } } e = pop(); if(stackp != stack) return NULL; patch(e.out, &matchstate); return e.start; #undef pop #undef push } /* * Convert infix regexp re to postfix notation. * Insert ESC (or 0x1b) as explicit concatenation operator. * Cheesy parser, return static buffer. */ char* re2post(char *re) { int nalt, natom; static char buf[8000]; char *dst; struct { int nalt; int natom; } paren[100], *p; p = paren; dst = buf; nalt = 0; natom = 0; if(strlen(re) >= sizeof buf/2) return NULL; for(; *re; re++){ switch(*re){ case PAREN_OPEN: // ( if(natom > 1){ --natom; *dst++ = CONCATENATE; } if(p >= paren+100) return NULL; p->nalt = nalt; p->natom = natom; p++; nalt = 0; natom = 0; break; case ALTERNATE: // | if(natom == 0) return NULL; while(--natom > 0) *dst++ = CONCATENATE; nalt++; break; case PAREN_CLOSE: // ) if(p == paren) return NULL; if(natom == 0) return NULL; while(--natom > 0) *dst++ = CONCATENATE; for(; nalt > 0; nalt--) *dst++ = ALTERNATE; --p; nalt = p->nalt; natom = p->natom; natom++; break; case STAR: // * case PLUS: // + case QUESTION: // ? if(natom == 0) return NULL; *dst++ = *re; break; default: if(natom > 1){ --natom; *dst++ = CONCATENATE; } *dst++ = *re; natom++; break; } } if(p != paren) return NULL; while(--natom > 0) *dst++ = CONCATENATE; for(; nalt > 0; nalt--) *dst++ = ALTERNATE; *dst = 0; return buf; } void readFile (char *fileName, char ***lines, int *lineIndex) { FILE *fp = fopen(fileName, "r"); char *source = NULL; if (fp != NULL) { /* Go to the end of the file. */ if (fseek(fp, 0L, SEEK_END) == 0) { /* Get the size of the file. */ long bufsize = ftell(fp); if (bufsize == -1) { /* Error */ } /* Allocate our buffer to that size. */ source = (char *) malloc(sizeof(char) * (bufsize + 1)); /* Go back to the start of the file. */ if (fseek(fp, 0L, SEEK_SET) == 0) { /* Error */ } /* Read the entire file into memory. */ size_t newLen = fread(source, sizeof(char), bufsize, fp); if (newLen == 0) { fputs("Error reading file", stderr); } else { source[newLen] = '\0'; /* Just to be safe. */ } } fclose(fp); } *lines = (char **) malloc (sizeof(char *) * 1); **lines = source; *lineIndex = 1; } void usage(const char* progname) { printf("Usage: %s [options] [pattern] \n", progname); printf("Program Options:\n"); printf(" -v Visualize the NFA then exit\n"); printf(" -p View postfix expression then exit\n"); printf(" -s View simplified expression then exit\n"); printf(" -t Print timing data\n"); printf(" -f <FILE> --file Input file to be matched\n"); printf(" -r <FILE> --regex Input file with regexs\n"); printf(" -? This message\n"); printf("[pattern] required only if -r or --regex is not used\n"); } void parseCmdLine(int argc, char **argv, int *visualize, int *postfix, int *time, int *simplified, char **fileName, char **regexFile) { if (argc < 3) { usage(argv[0]); exit(EXIT_SUCCESS); } int opt; static struct option long_options[] = { {"help", no_argument, 0, '?'}, {"postfix", no_argument, 0, 'p'}, {"simplified", no_argument, 0, 's'}, {"visualize", no_argument, 0, 'v'}, {"file", required_argument, 0, 'f'}, {"regex", required_argument, 0, 'r'}, {"time", no_argument, 0, 't'}, {0 ,0, 0, 0} }; *visualize = 0; *postfix = 0; *time = 0; *simplified = 0; while ((opt = getopt_long_only(argc, argv, "tvpsf:r:?", long_options, NULL)) != EOF) { switch (opt) { case 'v': *visualize = 1; break; case 'p': *postfix = 1; break; case 'f': *fileName = optarg; break; case 'r': *regexFile = optarg; break; case 't': *time = 1; break; case 's': *simplified = 1; break; default: usage(argv[0]); exit(EXIT_SUCCESS); } } } int hasSeen(State * start, int * index) { int i; for (i = 0; i < 5000; i++) { if (visited[i] == start->id) { *index = i; return 0; } } return 1; } void visualize_nfa_help(State * start) { int index; if (start == NULL) { return; } if (hasSeen(start, &index) == 0) { if (count[index] > 0) { return; } } count[start->id]++; visited[start->id] = start->id; char data[10]; if (start->c == Match) { strcpy(data, "Match"); } else if (start->c == Split) { strcpy(data, "Split"); } else if (start->c == Any) { strcpy(data, "Any"); } else { sprintf(data, "Char %c", start->c); } int outId, outId1; outId = (start->out == NULL) ? -1 : start->out->id; outId1 = (start->out1 == NULL) ? -1 : start->out1->id; printf("{ \"id\": \"%d\", \"data\":\"%s\", \"out\":\"%d\", \"out1\":\"%d\" \n},", start->id, data, outId, outId1); visualize_nfa_help(start->out); visualize_nfa_help(start->out1); } void visualize_nfa(State * start) { memset(visited, 0, 5000*(sizeof(int))); memset(count, 0, 5000*(sizeof(int))); printf("["); visualize_nfa_help(start); printf("]\n"); } double gettime() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec + tv.tv_usec / 1000000.0; }
53ea35a0286950bac118e92d2f22b6e78c51b7d7.hip
// !!! This is a file automatically generated by hipify!!! #include <iomanip> #include <omp.h> #include <types.h> #include <my_errors.h> #include <kernel.h> #include <functions.h> #include <utilis.h> using namespace std; HostError CudaInit(unsigned int *M, int NGPU, int rank, string gpu_name, const bool setdev, vector<unsigned int> &dev, string path){ hipDeviceProp_t *properties; int count; DeviceSafeCall(hipGetDeviceCount(&count)); properties = new hipDeviceProp_t [count]; ofstream hlog; unsigned int *to_use = new unsigned int [NGPU]; int k = 0; string temp; char* output_name; if((unsigned)NGPU != dev.size() && setdev) return HNoNumber; if(rank == 0) temp = path + "HiGPUslog.dat"; output_name = to_char(temp); hlog.open(output_name, ios::app); if(count < NGPU || count <= 0) return HNoGpus; for(int i = 0; i < count; i++){ DeviceSafeCall(hipGetDeviceProperties(&properties[i], i)); if(rank == 0) hlog<<" Available : "<<properties[i].name<<" as device : "<<i<<endl; } if(rank == 0) hlog<<"============================================="<<endl; if(!setdev){ for(int i = 0; i < count; i++){ if(gpu_name.length()>0 && to_string(properties[i].name) != gpu_name) { continue; } else { to_use[k] = i; k++; if(k >= NGPU) {break;} } } }else{ for(unsigned int i = 0; i < dev.size(); i++){ if(to_string(properties[dev[i]].name) != gpu_name) continue; else{ to_use[k] = dev[i]; k++; } } } if(k<NGPU) return HNoGpus; if(rank==0) { for(int i = 0; i < NGPU; i++) hlog<<" Using : "<<properties[to_use[i]].name<<" (device "<<to_use[i]<<")"<<endl; } if(rank == 0){ if(properties[to_use[0]].major == 2) *M = properties[to_use[0]].multiProcessorCount * 1536; else if(properties[to_use[0]].major == 3) *M = properties[to_use[0]].multiProcessorCount * 2048; else if(properties[to_use[0]].major == 1){ if(properties[to_use[0]].minor == 3) *M = properties[to_use[0]].multiProcessorCount * 1024; else return HNoDouble; } cout<<" Maximum number of parallel threads on the gpu : "<<*M<<endl; } MPISafeCall(MPI_Bcast(M, 1, MPI_INT, 0, MPI_COMM_WORLD)); dev.resize(NGPU); for(int i = 0; i < NGPU; i++) dev[i] = to_use[i]; delete [] properties; delete [] to_use; if(rank == 0) hlog.close(); return HNoError; }
53ea35a0286950bac118e92d2f22b6e78c51b7d7.cu
#include <iomanip> #include <omp.h> #include <types.h> #include <my_errors.h> #include <kernel.h> #include <functions.h> #include <utilis.h> using namespace std; HostError CudaInit(unsigned int *M, int NGPU, int rank, string gpu_name, const bool setdev, vector<unsigned int> &dev, string path){ cudaDeviceProp *properties; int count; DeviceSafeCall(cudaGetDeviceCount(&count)); properties = new cudaDeviceProp [count]; ofstream hlog; unsigned int *to_use = new unsigned int [NGPU]; int k = 0; string temp; char* output_name; if((unsigned)NGPU != dev.size() && setdev) return HNoNumber; if(rank == 0) temp = path + "HiGPUslog.dat"; output_name = to_char(temp); hlog.open(output_name, ios::app); if(count < NGPU || count <= 0) return HNoGpus; for(int i = 0; i < count; i++){ DeviceSafeCall(cudaGetDeviceProperties(&properties[i], i)); if(rank == 0) hlog<<" Available : "<<properties[i].name<<" as device : "<<i<<endl; } if(rank == 0) hlog<<"============================================="<<endl; if(!setdev){ for(int i = 0; i < count; i++){ if(gpu_name.length()>0 && to_string(properties[i].name) != gpu_name) { continue; } else { to_use[k] = i; k++; if(k >= NGPU) {break;} } } }else{ for(unsigned int i = 0; i < dev.size(); i++){ if(to_string(properties[dev[i]].name) != gpu_name) continue; else{ to_use[k] = dev[i]; k++; } } } if(k<NGPU) return HNoGpus; if(rank==0) { for(int i = 0; i < NGPU; i++) hlog<<" Using : "<<properties[to_use[i]].name<<" (device "<<to_use[i]<<")"<<endl; } if(rank == 0){ if(properties[to_use[0]].major == 2) *M = properties[to_use[0]].multiProcessorCount * 1536; else if(properties[to_use[0]].major == 3) *M = properties[to_use[0]].multiProcessorCount * 2048; else if(properties[to_use[0]].major == 1){ if(properties[to_use[0]].minor == 3) *M = properties[to_use[0]].multiProcessorCount * 1024; else return HNoDouble; } cout<<" Maximum number of parallel threads on the gpu : "<<*M<<endl; } MPISafeCall(MPI_Bcast(M, 1, MPI_INT, 0, MPI_COMM_WORLD)); dev.resize(NGPU); for(int i = 0; i < NGPU; i++) dev[i] = to_use[i]; delete [] properties; delete [] to_use; if(rank == 0) hlog.close(); return HNoError; }
17bcab268008bae4c3cd9f7748d47383aa3a525b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdint> #include <vector> #include <iostream> #include "../src/communicator.h" #include "../src/error.cuh" #define COUNT 50'000'000LL __global__ void set_data(uint64_t *start_addr, uint64_t size, uint64_t start_val) { const int ithread = threadIdx.x + blockDim.x * blockIdx.x; const int stride = blockDim.x * gridDim.x; for (uint64_t ielement = ithread; ielement < size; ielement += stride) { start_addr[ielement] = (start_val + ielement); } } __global__ void test_correctness(uint64_t *start_addr, uint64_t size, uint64_t start_val) { const int ithread = threadIdx.x + blockDim.x * blockIdx.x; const int stride = blockDim.x * gridDim.x; for (uint64_t ielement = ithread; ielement < size; ielement += stride) { assert(start_addr[ielement] == (start_val + ielement)); } } int main(int argc, char *argv[]) { UCXBufferCommunicator communicator; communicator.initialize(argc, argv); int mpi_rank = communicator.mpi_rank; int mpi_size = communicator.mpi_size; communicator.setup_cache(2 * mpi_size, 20'000'000LL); communicator.warmup_cache(); /* Send and recv data */ uint64_t *send_buf {nullptr}; std::vector<uint64_t *> recv_buf(mpi_size, nullptr); std::vector<comm_handle_t> send_reqs(mpi_size, nullptr); std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr); RMM_CALL(RMM_ALLOC(&send_buf, COUNT * sizeof(uint64_t), 0)); int grid_size {-1}; int block_size {-1}; CUDA_RT_CALL(hipOccupancyMaxPotentialBlockSize(&grid_size, &block_size, set_data)); hipLaunchKernelGGL(( set_data), dim3(grid_size), dim3(block_size), 0, 0, send_buf, COUNT, COUNT * mpi_rank); for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { send_reqs[irank] = communicator.send((void *)send_buf, COUNT, sizeof(uint64_t), irank, 32); } } int64_t count_received; for (int irank = mpi_size - 1; irank >= 0; irank --) { if (irank != mpi_rank) { recv_reqs[irank] = communicator.recv( (void **)&recv_buf[irank], &count_received, sizeof(uint64_t), irank, 32 ); } } communicator.waitall(send_reqs); communicator.waitall(recv_reqs); assert(count_received == COUNT); /* Test the correctness */ for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { hipLaunchKernelGGL(( test_correctness), dim3(grid_size), dim3(block_size), 0, 0, recv_buf[irank], COUNT, COUNT * irank); } } /* Cleanup */ RMM_CALL(RMM_FREE(send_buf, 0)); for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { RMM_CALL(RMM_FREE(recv_buf[irank], 0)); } } communicator.finalize(); if (mpi_rank == 0) { std::cerr << "Test case \"buffer_communicator\" passes successfully.\n"; } return 0; }
17bcab268008bae4c3cd9f7748d47383aa3a525b.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdint> #include <vector> #include <iostream> #include "../src/communicator.h" #include "../src/error.cuh" #define COUNT 50'000'000LL __global__ void set_data(uint64_t *start_addr, uint64_t size, uint64_t start_val) { const int ithread = threadIdx.x + blockDim.x * blockIdx.x; const int stride = blockDim.x * gridDim.x; for (uint64_t ielement = ithread; ielement < size; ielement += stride) { start_addr[ielement] = (start_val + ielement); } } __global__ void test_correctness(uint64_t *start_addr, uint64_t size, uint64_t start_val) { const int ithread = threadIdx.x + blockDim.x * blockIdx.x; const int stride = blockDim.x * gridDim.x; for (uint64_t ielement = ithread; ielement < size; ielement += stride) { assert(start_addr[ielement] == (start_val + ielement)); } } int main(int argc, char *argv[]) { UCXBufferCommunicator communicator; communicator.initialize(argc, argv); int mpi_rank = communicator.mpi_rank; int mpi_size = communicator.mpi_size; communicator.setup_cache(2 * mpi_size, 20'000'000LL); communicator.warmup_cache(); /* Send and recv data */ uint64_t *send_buf {nullptr}; std::vector<uint64_t *> recv_buf(mpi_size, nullptr); std::vector<comm_handle_t> send_reqs(mpi_size, nullptr); std::vector<comm_handle_t> recv_reqs(mpi_size, nullptr); RMM_CALL(RMM_ALLOC(&send_buf, COUNT * sizeof(uint64_t), 0)); int grid_size {-1}; int block_size {-1}; CUDA_RT_CALL(cudaOccupancyMaxPotentialBlockSize(&grid_size, &block_size, set_data)); set_data<<<grid_size, block_size>>>(send_buf, COUNT, COUNT * mpi_rank); for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { send_reqs[irank] = communicator.send((void *)send_buf, COUNT, sizeof(uint64_t), irank, 32); } } int64_t count_received; for (int irank = mpi_size - 1; irank >= 0; irank --) { if (irank != mpi_rank) { recv_reqs[irank] = communicator.recv( (void **)&recv_buf[irank], &count_received, sizeof(uint64_t), irank, 32 ); } } communicator.waitall(send_reqs); communicator.waitall(recv_reqs); assert(count_received == COUNT); /* Test the correctness */ for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { test_correctness<<<grid_size, block_size>>>(recv_buf[irank], COUNT, COUNT * irank); } } /* Cleanup */ RMM_CALL(RMM_FREE(send_buf, 0)); for (int irank = 0; irank < mpi_size; irank ++) { if (irank != mpi_rank) { RMM_CALL(RMM_FREE(recv_buf[irank], 0)); } } communicator.finalize(); if (mpi_rank == 0) { std::cerr << "Test case \"buffer_communicator\" passes successfully.\n"; } return 0; }
197d8f3da03981b67ee1a85fe2b0b6b76f44a706.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <stdio.h> #include <math.h> #include <complex> #include <cmath> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <hipfft.h> #include <omp.h> //#include <mpi.h> #define DATA_OFFSET_OFFSET 0x000A #define WIDTH_OFFSET 0x0012 #define HEIGHT_OFFSET 0x0016 #define BITS_PER_PIXEL_OFFSET 0x001C #define HEADER_SIZE 14 #define INFO_HEADER_SIZE 40 #define NO_COMPRESION 0 #define MAX_NUMBER_OF_COLORS 0 #define ALL_COLORS_REQUIRED 0 using namespace std; const std::complex<double> i1(0, 1); typedef unsigned int int32; typedef short int16; typedef unsigned char byte; __global__ void multiplyElementwise(hipfftDoubleComplex* f0, hipfftDoubleComplex* f1, int size) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < size) { double a, b, c, d; a = f0[i].x; b = f0[i].y; c = f1[i].x; d = f1[i].y; f0[i].x = a*c - b*d; f0[i].y = a*d + b*c; } } // void u_in_in_big(double* u_in, hipfftDoubleComplex* data, int NX, int NY, int multi); // void h_z(double lam, double z, double k, double sampling, int NX, int NY, hipfftDoubleComplex* h_z_cutab); // void Q_roll(hipfftDoubleComplex* u_in_fft, hipfftDoubleComplex* data, int NX, int NY); // void amplitude_print(hipfftDoubleComplex* u_in_fft, int NX, int NY, FILE* fp); // int FFT_Z2Z(hipfftDoubleComplex* dData, int NX, int NY); // int IFFT_Z2Z(hipfftDoubleComplex* dData, int NX, int NY); // ----------------------------------------------------------------------------------------------------------------------------------------------- // // --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- // // ----------------------------------------------------------------------------------------------------------------------------------------------- // void u_in_in_big(double* u_in, hipfftDoubleComplex* data, int NX, int NY, int multi) { for(int ii=0; ii < NY ; ii++) { for(int jj=0; jj < NX ; jj++) { data[ii*NX+jj].x = 0; data[ii*NX+jj].y = 0; } } for(int ii=0; ii < (int)NY/multi ; ii++) { for(int jj=0; jj < (int)NX/multi ; jj++) { data[(ii*NX+jj)+(NX*NY*(multi-1)/(multi*2)+NX*(multi-1)/(multi*2))].x = u_in[ii*(NX/multi)+jj]; } } } void hz(double lam, double z, double k, double sampling, int NX, int NY, hipfftDoubleComplex* hz_cutab) { std::complex<double>* hz_tab; hz_tab = (std::complex<double> *) malloc ( sizeof(std::complex<double>)* NX * NY); double fi = 0; double teta = 0; double lam_z = 0; fi = k * z; teta = k / (2.0 * z); lam_z = lam * z; double quad = 0.0; double teta1 = 0.0; for(int iy=0; iy < NY; iy++) { //printf("\n"); for(int ix=0; ix < NX ; ix++) { quad = pow(((double)ix-((double)NX/2.0))*sampling, 2) + pow(((double)iy-((double)NY/2.0))*sampling, 2); teta1 = teta * quad; //hz_tab[iy*NX+ix] = ::exp(i*fi) * ::exp(i*teta1)/(i*lam_z); hz_tab[iy*NX+ix] = ::exp(i1*fi) * ::exp(i1*teta1)/(i1*lam_z); hz_cutab[iy*NX+ix].x = hz_tab[iy*NX+ix].real(); hz_cutab[iy*NX+ix].y = hz_tab[iy*NX+ix].imag(); //printf("%.2f\t", hz_cutab[iy*NX+ix].x); } } free(hz_tab); } void Qroll(hipfftDoubleComplex* u_in_fft, hipfftDoubleComplex* data, int NX, int NY) { for(int iy=0; iy<(NY/4); iy++) //Petla na przepisanie tablicy koncowej { for(int jx=0; jx<(NX/4); jx++) { u_in_fft[(NX/2*NY/4+NY/4)+(jx+iy*NX/2)] = data[iy*(NX)+jx]; // Q1 -> Q4 u_in_fft[(jx+NX/4)+(iy*NX/2)] = data[(iy*(NX)+jx)+(NX*NY*3/4)]; // Q3 -> Q2 u_in_fft[(jx)+(iy*NX/2)] = data[((iy*NX)+jx)+(NX*3/4+NX*NY*3/4)]; // Q4 -> Q1 u_in_fft[(jx)+(iy*NX/2)+NX*NY/2/4] = data[((iy*NX)+jx)+(NX*3/4)]; // Q2 -> Q3 } } } void amplitude_print(hipfftDoubleComplex* u_in_fft, int NX, int NY, FILE* fp) { // --- Przeliczanie Amplitudy --- // for(int ii=0; ii<(NX*NY/4); ii++) { u_in_fft[ii].x = sqrt(pow(u_in_fft[ii].x, 2) + pow(u_in_fft[ii].y, 2)); } double mini_data = u_in_fft[0].x; for(int ii=0; ii<(NX*NY/4); ii++) { if (u_in_fft[ii].x < mini_data){ mini_data = u_in_fft[ii].x; } } double max_data = u_in_fft[0].x; mini_data = -mini_data; for(int ii=0; ii<(NX*NY/4); ii++) { u_in_fft[ii].x = u_in_fft[ii].x + mini_data; if (u_in_fft[ii].x > max_data) { max_data = u_in_fft[ii].x; } } for(int ii=0; ii<(NX*NY/4); ii++) { if (ii%(NX/2) == 0){fprintf (fp,"\n");} u_in_fft[ii].x = u_in_fft[ii].x / max_data * 255.0; fprintf (fp,"%.0f\t", u_in_fft[ii].x); } } int FFT_Z2Z(hipfftDoubleComplex* dData, int NX, int NY) { // Create a 2D FFT plan. int err = 0; hipfftHandle plan1; if (hipfftPlan2d(&plan1, NX, NY, HIPFFT_Z2Z) != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to create plan\n"); err = -1; } if (hipfftExecZ2Z(plan1, dData, dData, HIPFFT_FORWARD) != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); err = -1; } if (hipDeviceSynchronize() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); err = -1; } hipfftDestroy(plan1); return err; } int IFFT_Z2Z(hipfftDoubleComplex* dData, int NX, int NY) { // Create a 2D FFT plan. int err = 0; hipfftHandle plan1; if (hipfftPlan2d(&plan1, NX, NY, HIPFFT_Z2Z) != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to create plan\n"); err = -1; } if (hipfftExecZ2Z(plan1, dData, dData, HIPFFT_BACKWARD) != HIPFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); err = -1; } if (hipDeviceSynchronize() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); err = -1; } hipfftDestroy(plan1); return err; } void BMP_Save_Amplitude(hipfftDoubleComplex* u_out, int NX, int NY, FILE* fp) { // --- SAVE BMP FILE --- // //uint8_t colorIndex = 0; //uint16_t color = 0; unsigned int headers[13]; int extrabytes; int paddedsize; int x = 0; int y = 0; int n = 0; int red = 0; int green = 0; int blue = 0; int WIDTH = NX/2; int HEIGHT = NY/2; extrabytes = 4 - ((WIDTH * 3) % 4); // How many bytes of padding to add to each // horizontal line - the size of which must // be a multiple of 4 bytes. if (extrabytes == 4) extrabytes = 0; paddedsize = ((WIDTH * 3) + extrabytes) * HEIGHT; // Headers... // Note that the "BM" identifier in bytes 0 and 1 is NOT included in these "headers". headers[0] = paddedsize + 54; // bfSize (whole file size) headers[1] = 0; // bfReserved (both) headers[2] = 54; // bfOffbits headers[3] = 40; // biSize headers[4] = WIDTH; // biWidth headers[5] = HEIGHT; // biHeight // Would have biPlanes and biBitCount in position 6, but they're shorts. // It's easier to write them out separately (see below) than pretend // they're a single int, especially with endian issues... headers[7] = 0; // biCompression headers[8] = paddedsize; // biSizeImage headers[9] = 0; // biXPelsPerMeter headers[10] = 0; // biYPelsPerMeter headers[11] = 0; // biClrUsed headers[12] = 0; // biClrImportant // outfile = fopen(filename, "wb"); //File file = fopen("test.bmp", "wb"); if (!fp) { cout << "There was an error opening the file for writing"; //return; }else{ // Headers begin... // When printing ints and shorts, we write out 1 character at a time to avoid endian issues. fprintf(fp, "BM"); for (n = 0; n <= 5; n++) { fprintf(fp, "%c", headers[n] & 0x000000FF); fprintf(fp, "%c", (headers[n] & 0x0000FF00) >> 8); fprintf(fp, "%c", (headers[n] & 0x00FF0000) >> 16); fprintf(fp, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24); } // These next 4 characters are for the biPlanes and biBitCount fields. fprintf(fp, "%c", 1); fprintf(fp, "%c", 0); fprintf(fp, "%c", 24); fprintf(fp, "%c", 0); for (n = 7; n <= 12; n++) { fprintf(fp, "%c", headers[n] & 0x000000FF); fprintf(fp, "%c", (headers[n] & 0x0000FF00) >> 8); fprintf(fp, "%c", (headers[n] & 0x00FF0000) >> 16); fprintf(fp, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24); } // --- Przeliczanie Amplitudy --- // for(int ii=0; ii<(NX*NY/4); ii++) { u_out[ii].x = sqrt(pow(u_out[ii].x, 2) + pow(u_out[ii].y, 2)); } double mini_data = u_out[0].x; for(int ii=0; ii<(NX*NY/4); ii++) { if (u_out[ii].x < mini_data){ mini_data = u_out[ii].x; } } double max_data = u_out[0].x; mini_data = -mini_data; for(int ii=0; ii<(NX*NY/4); ii++) { u_out[ii].x = u_out[ii].x + mini_data; if (u_out[ii].x > max_data) { max_data = u_out[ii].x; } } for(int ii=0; ii<(NX*NY/4); ii++) { //if (ii%(NX/2) == 0){fprintf (fp,"\n");} u_out[ii].x = u_out[ii].x / max_data * 255.0; //fprintf (fp,"%.0f\t", u_in_fft[ii].x); } // Headers done, now write the data... for (y = HEIGHT - 1; y >= 0; y--) // BMP image format is written from bottom to top... { for (x = 0; x <= WIDTH - 1; x++) { red = u_out[x+(NX/2*y)].x; if (red > 255) red = 255; if (red < 0) red = 0; green = red; blue = red; // --- RGB range from 0 to 255 --- // // if (red > 255) red = 255; if (red < 0) red = 0; // if (green > 255) green = 255; if (green < 0) green = 0; // if (blue > 255) blue = 255; if (blue < 0) blue = 0; // Also, it's written in (b,g,r) format... fprintf (fp, "%c", blue); fprintf (fp, "%c", green); fprintf (fp, "%c", red); } if (extrabytes) // See above - BMP lines must be of lengths divisible by 4. { for (n = 1; n <= extrabytes; n++) { fprintf (fp, "%c", 0); } } } //fclose(fp); cout << "Writing to BMP complete!" << endl; } // --- END SAVING BMP FILE --- // } void ReadImage(const char *fileName,byte **pixels, int32 *width, int32 *height, int32 *bytesPerPixel) { FILE *imageFile = fopen(fileName, "rb"); int32 dataOffset; fseek(imageFile, DATA_OFFSET_OFFSET, SEEK_SET); fread(&dataOffset, 4, 1, imageFile); fseek(imageFile, WIDTH_OFFSET, SEEK_SET); fread(width, 4, 1, imageFile); fseek(imageFile, HEIGHT_OFFSET, SEEK_SET); fread(height, 4, 1, imageFile); int16 bitsPerPixel; fseek(imageFile, BITS_PER_PIXEL_OFFSET, SEEK_SET); fread(&bitsPerPixel, 2, 1, imageFile); *bytesPerPixel = ((int32)bitsPerPixel) / 8; int paddedRowSize = (int)(4 * ceil((float)(*width) / 4.0f))*(*bytesPerPixel); int unpaddedRowSize = (*width)*(*bytesPerPixel); int totalSize = unpaddedRowSize*(*height); cout << "BMP FILE: " << fileName << " | Width: " << *width << " | Height: " << *height << " | Total Size: " << totalSize << " | BitsPerPixel: " << bitsPerPixel << endl; *pixels = (byte*)malloc(totalSize); int i = 0; byte *currentRowPointer = *pixels+((*height-1)*unpaddedRowSize); for (i = 0; i < *height; i++) { fseek(imageFile, dataOffset+(i*paddedRowSize), SEEK_SET); fread(currentRowPointer, 1, unpaddedRowSize, imageFile); currentRowPointer -= unpaddedRowSize; } fclose(imageFile); } /* * start program: ./cudaOpenMP Test_NTO_1024.bmp 2 500.0 633.0 10.0 * start program: ./cudaOpenMP plik_z_przezroczem.BMP Multiply_tmp Odleglosc_Z_mm Dl_fali_Lambda_nm Sampling_micro */ // --- Main Part --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- // int main(int argc, char *argv[]) { cout << "Welcome to CUDA test" << endl; //int COL = atoi(argv[2]); //int ROW = atoi(argv[3]); //double* u_in; //u_in = (double *) malloc ( sizeof(double)* COL * ROW); cout << "WELCOME" << " | " << argv[0] << " | " << argv[1] << " | " << argv[2] << " | " << argv[3] << " | " << atoi(argv[4]) << " | " << atoi(argv[5]) << endl; printf("\n---------------------------\n"); // --- PC Specs finder --- // int num_gpus = 0; // number of CUDA GPUs hipGetDeviceCount(&num_gpus); if (num_gpus < 1) { printf("no CUDA capable devices were detected\n"); return 1; } printf("Number of host CPUs:\t%d\n", omp_get_num_procs()); printf("Number of CUDA devices:\t%d\n", num_gpus); for (int i = 0; i < num_gpus; i++) { hipDeviceProp_t dprop; hipGetDeviceProperties(&dprop, i); printf(" %d: %s\n", i, dprop.name); } printf("---------------------------\n\n"); // --- Import BMP image --- // byte *pixels; int32 width; int32 height; int32 bytesPerPixel; ReadImage(argv[1], &pixels, &width, &height,&bytesPerPixel); double* Image_Red = (double *) malloc ( sizeof(double)* width * height); double* Image_Green = (double *) malloc ( sizeof(double)* width * height); double* Image_Blue = (double *) malloc ( sizeof(double)* width * height); int iterator = 0; for(int i=0; i<(height*width)*3; i+=3) { Image_Red[iterator] = pixels[i]; Image_Green[iterator] = pixels[i+1]; Image_Blue[iterator] = pixels[i+2]; iterator++; } free(pixels); int32 COL = width; int32 ROW = height; // --- Import TXT file with image --- // // ifstream inputFile; // inputFile.open(argv[1]); // if (inputFile) // { // cout << "Import file: " << argv[1] << endl; // int i,j = 0; // for (i = 0; i < ROW; i++) // { // for (j = 0; j < COL; j++) // { // inputFile >> u_in[i*ROW+j]; // } // } // cout << "Import file - complete" << endl; // } else { // cout << "Error opening the file.\n"; // } // inputFile.close(); int multi = atoi(argv[2]); int NX = COL*multi; int NY = ROW*multi; // --- Przeliczenie hz --- // double sampling = atof(argv[5]) * pow(10.0, (-6)); // Sampling = 10 micro double lam = atof(argv[4]) * (pow(10.0,(-9))); // Lambda = 633 nm double k = 2.0 * M_PI / lam; // Wektor falowy k double z_in = atof(argv[3])*(pow(10.0,(-3))); // Odleglosc propagacji = 0,5 m double z_out = 1000.0*(pow(10.0,(-3))); // Koniec odlegoci propagacji = 1 m double z_delta = 50.0*(pow(10.0,(-3))); // Skok odlegoci = 0,05 m //double z = z_in+(ip*z_delta); // Odlego Z dla kadego wtku MPI double z = z_in; printf("\nVariables | k = %.1f | Lambda = %.1f nm | Z = %.4f m | Sampling = %.3f micro | Tablica tymczasowa = x%i |\n\n", k, lam*(pow(10.0,(9))), z, sampling*pow(10.0,(6)), multi); // --- FFT tablicy wejsciowej --- // hipfftDoubleComplex* data; data = (hipfftDoubleComplex *) malloc ( sizeof(hipfftDoubleComplex)* NX * NY); hipfftDoubleComplex* dData; hipMalloc((void **) &dData, sizeof(hipfftDoubleComplex)* NX * NY); if (hipGetLastError() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: Allocate Cuda Memory\n"); return -1; } size_t pitch1; // --- Wpisanie tablicy wejsciowej do wiekszej tablicy tymczasowej --- // u_in_in_big(Image_Green, data, NX, NY, multi); // Poki co 'Image_Green' jako tablica wejsciowa // --- Liczenie U_in = FFT{u_in} --- // hipMallocPitch(&dData, &pitch1, sizeof(hipfftDoubleComplex)*NX, NY); hipMemcpy2D(dData,pitch1,data,sizeof(hipfftDoubleComplex)*NX,sizeof(hipfftDoubleComplex)*NX,NX,hipMemcpyHostToDevice); if (hipGetLastError() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: Calculate FFT{u_in}\n"); return -1; } if (FFT_Z2Z(dData, NX, NY) == -1) { return -1; } hipMemcpy(data, dData, sizeof(hipfftDoubleComplex)*NX*NY, hipMemcpyDeviceToHost); // --- Liczenie hz --- // hipfftDoubleComplex* hz_tab; hz_tab = (hipfftDoubleComplex *) malloc ( sizeof(hipfftDoubleComplex)* NX * NY); hz(lam, z, k, sampling, NX, NY, hz_tab); // --- Liczenie hz = FFT{hz_tab} --- // hipfftDoubleComplex* hz; hipMalloc((void **) &hz, sizeof(hipfftDoubleComplex)* NX * NY); size_t pitch2; hipMallocPitch(&hz, &pitch2, sizeof(hipfftDoubleComplex)*NX, NY); hipMemcpy2D(hz,pitch2,hz_tab,sizeof(hipfftDoubleComplex)*NX,sizeof(hipfftDoubleComplex)*NX,NX,hipMemcpyHostToDevice); if(hipGetLastError() != hipSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: FFT{hz_tab}\n"); return -1; } if (FFT_Z2Z(hz, NX, NY) == -1) { return -1; } // --- Do the actual multiplication --- // hipLaunchKernelGGL(( multiplyElementwise), dim3(NX*NY), dim3(1), 0, 0, dData, hz, NX*NY); // --- Liczenie u_out = iFFT{dData = U_OUT} --- // if(IFFT_Z2Z(dData, NX, NY) == -1) { return -1; } hipMemcpy(data, dData, sizeof(hipfftDoubleComplex)*NX*NY, hipMemcpyDeviceToHost); // --- ROLL cwiartek, zeby wszystko sie zgadzalo na koniec --- // hipfftDoubleComplex* u_out; u_out = (hipfftDoubleComplex *) malloc (sizeof(hipfftDoubleComplex)* NX/2 * NY/2); Qroll(u_out, data, NX, NY); // --- Zapis do pliku BMP --- // char filename[128]; snprintf ( filename, 128, "z_%.3lf-m_lam_%.1lf-nm_samp_%.1lf-micro.BMP", z, lam*(pow(10.0,(9))), sampling*(pow(10.0,(6)))); FILE* fp = fopen(filename,"wb"); // --- Przeliczanie Amplitudy i Zapis do pliku --- // //amplitude_print(u_out, NX, NY, fp); BMP_Save_Amplitude(u_out, NX, NY, fp); fclose(fp); // --- Zwalnianie pamieci --- // hipFree(u_out); hipFree(data); hipFree(dData); hipFree(hz_tab); hipFree(hz); free(Image_Red); free(Image_Green); free(Image_Blue); return 0; }
197d8f3da03981b67ee1a85fe2b0b6b76f44a706.cu
#include <iostream> #include <fstream> #include <stdio.h> #include <math.h> #include <complex> #include <cmath> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <cufft.h> #include <omp.h> //#include <mpi.h> #define DATA_OFFSET_OFFSET 0x000A #define WIDTH_OFFSET 0x0012 #define HEIGHT_OFFSET 0x0016 #define BITS_PER_PIXEL_OFFSET 0x001C #define HEADER_SIZE 14 #define INFO_HEADER_SIZE 40 #define NO_COMPRESION 0 #define MAX_NUMBER_OF_COLORS 0 #define ALL_COLORS_REQUIRED 0 using namespace std; const std::complex<double> i1(0, 1); typedef unsigned int int32; typedef short int16; typedef unsigned char byte; __global__ void multiplyElementwise(cufftDoubleComplex* f0, cufftDoubleComplex* f1, int size) { const int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < size) { double a, b, c, d; a = f0[i].x; b = f0[i].y; c = f1[i].x; d = f1[i].y; f0[i].x = a*c - b*d; f0[i].y = a*d + b*c; } } // void u_in_in_big(double* u_in, cufftDoubleComplex* data, int NX, int NY, int multi); // void h_z(double lam, double z, double k, double sampling, int NX, int NY, cufftDoubleComplex* h_z_cutab); // void Q_roll(cufftDoubleComplex* u_in_fft, cufftDoubleComplex* data, int NX, int NY); // void amplitude_print(cufftDoubleComplex* u_in_fft, int NX, int NY, FILE* fp); // int FFT_Z2Z(cufftDoubleComplex* dData, int NX, int NY); // int IFFT_Z2Z(cufftDoubleComplex* dData, int NX, int NY); // ----------------------------------------------------------------------------------------------------------------------------------------------- // // --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- Functions --- // // ----------------------------------------------------------------------------------------------------------------------------------------------- // void u_in_in_big(double* u_in, cufftDoubleComplex* data, int NX, int NY, int multi) { for(int ii=0; ii < NY ; ii++) { for(int jj=0; jj < NX ; jj++) { data[ii*NX+jj].x = 0; data[ii*NX+jj].y = 0; } } for(int ii=0; ii < (int)NY/multi ; ii++) { for(int jj=0; jj < (int)NX/multi ; jj++) { data[(ii*NX+jj)+(NX*NY*(multi-1)/(multi*2)+NX*(multi-1)/(multi*2))].x = u_in[ii*(NX/multi)+jj]; } } } void hz(double lam, double z, double k, double sampling, int NX, int NY, cufftDoubleComplex* hz_cutab) { std::complex<double>* hz_tab; hz_tab = (std::complex<double> *) malloc ( sizeof(std::complex<double>)* NX * NY); double fi = 0; double teta = 0; double lam_z = 0; fi = k * z; teta = k / (2.0 * z); lam_z = lam * z; double quad = 0.0; double teta1 = 0.0; for(int iy=0; iy < NY; iy++) { //printf("\n"); for(int ix=0; ix < NX ; ix++) { quad = pow(((double)ix-((double)NX/2.0))*sampling, 2) + pow(((double)iy-((double)NY/2.0))*sampling, 2); teta1 = teta * quad; //hz_tab[iy*NX+ix] = std::exp(i*fi) * std::exp(i*teta1)/(i*lam_z); hz_tab[iy*NX+ix] = std::exp(i1*fi) * std::exp(i1*teta1)/(i1*lam_z); hz_cutab[iy*NX+ix].x = hz_tab[iy*NX+ix].real(); hz_cutab[iy*NX+ix].y = hz_tab[iy*NX+ix].imag(); //printf("%.2f\t", hz_cutab[iy*NX+ix].x); } } free(hz_tab); } void Qroll(cufftDoubleComplex* u_in_fft, cufftDoubleComplex* data, int NX, int NY) { for(int iy=0; iy<(NY/4); iy++) //Petla na przepisanie tablicy koncowej { for(int jx=0; jx<(NX/4); jx++) { u_in_fft[(NX/2*NY/4+NY/4)+(jx+iy*NX/2)] = data[iy*(NX)+jx]; // Q1 -> Q4 u_in_fft[(jx+NX/4)+(iy*NX/2)] = data[(iy*(NX)+jx)+(NX*NY*3/4)]; // Q3 -> Q2 u_in_fft[(jx)+(iy*NX/2)] = data[((iy*NX)+jx)+(NX*3/4+NX*NY*3/4)]; // Q4 -> Q1 u_in_fft[(jx)+(iy*NX/2)+NX*NY/2/4] = data[((iy*NX)+jx)+(NX*3/4)]; // Q2 -> Q3 } } } void amplitude_print(cufftDoubleComplex* u_in_fft, int NX, int NY, FILE* fp) { // --- Przeliczanie Amplitudy --- // for(int ii=0; ii<(NX*NY/4); ii++) { u_in_fft[ii].x = sqrt(pow(u_in_fft[ii].x, 2) + pow(u_in_fft[ii].y, 2)); } double mini_data = u_in_fft[0].x; for(int ii=0; ii<(NX*NY/4); ii++) { if (u_in_fft[ii].x < mini_data){ mini_data = u_in_fft[ii].x; } } double max_data = u_in_fft[0].x; mini_data = -mini_data; for(int ii=0; ii<(NX*NY/4); ii++) { u_in_fft[ii].x = u_in_fft[ii].x + mini_data; if (u_in_fft[ii].x > max_data) { max_data = u_in_fft[ii].x; } } for(int ii=0; ii<(NX*NY/4); ii++) { if (ii%(NX/2) == 0){fprintf (fp,"\n");} u_in_fft[ii].x = u_in_fft[ii].x / max_data * 255.0; fprintf (fp,"%.0f\t", u_in_fft[ii].x); } } int FFT_Z2Z(cufftDoubleComplex* dData, int NX, int NY) { // Create a 2D FFT plan. int err = 0; cufftHandle plan1; if (cufftPlan2d(&plan1, NX, NY, CUFFT_Z2Z) != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to create plan\n"); err = -1; } if (cufftExecZ2Z(plan1, dData, dData, CUFFT_FORWARD) != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); err = -1; } if (cudaDeviceSynchronize() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); err = -1; } cufftDestroy(plan1); return err; } int IFFT_Z2Z(cufftDoubleComplex* dData, int NX, int NY) { // Create a 2D FFT plan. int err = 0; cufftHandle plan1; if (cufftPlan2d(&plan1, NX, NY, CUFFT_Z2Z) != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to create plan\n"); err = -1; } if (cufftExecZ2Z(plan1, dData, dData, CUFFT_INVERSE) != CUFFT_SUCCESS){ fprintf(stderr, "CUFFT Error: Unable to execute plan\n"); err = -1; } if (cudaDeviceSynchronize() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to synchronize\n"); err = -1; } cufftDestroy(plan1); return err; } void BMP_Save_Amplitude(cufftDoubleComplex* u_out, int NX, int NY, FILE* fp) { // --- SAVE BMP FILE --- // //uint8_t colorIndex = 0; //uint16_t color = 0; unsigned int headers[13]; int extrabytes; int paddedsize; int x = 0; int y = 0; int n = 0; int red = 0; int green = 0; int blue = 0; int WIDTH = NX/2; int HEIGHT = NY/2; extrabytes = 4 - ((WIDTH * 3) % 4); // How many bytes of padding to add to each // horizontal line - the size of which must // be a multiple of 4 bytes. if (extrabytes == 4) extrabytes = 0; paddedsize = ((WIDTH * 3) + extrabytes) * HEIGHT; // Headers... // Note that the "BM" identifier in bytes 0 and 1 is NOT included in these "headers". headers[0] = paddedsize + 54; // bfSize (whole file size) headers[1] = 0; // bfReserved (both) headers[2] = 54; // bfOffbits headers[3] = 40; // biSize headers[4] = WIDTH; // biWidth headers[5] = HEIGHT; // biHeight // Would have biPlanes and biBitCount in position 6, but they're shorts. // It's easier to write them out separately (see below) than pretend // they're a single int, especially with endian issues... headers[7] = 0; // biCompression headers[8] = paddedsize; // biSizeImage headers[9] = 0; // biXPelsPerMeter headers[10] = 0; // biYPelsPerMeter headers[11] = 0; // biClrUsed headers[12] = 0; // biClrImportant // outfile = fopen(filename, "wb"); //File file = fopen("test.bmp", "wb"); if (!fp) { cout << "There was an error opening the file for writing"; //return; }else{ // Headers begin... // When printing ints and shorts, we write out 1 character at a time to avoid endian issues. fprintf(fp, "BM"); for (n = 0; n <= 5; n++) { fprintf(fp, "%c", headers[n] & 0x000000FF); fprintf(fp, "%c", (headers[n] & 0x0000FF00) >> 8); fprintf(fp, "%c", (headers[n] & 0x00FF0000) >> 16); fprintf(fp, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24); } // These next 4 characters are for the biPlanes and biBitCount fields. fprintf(fp, "%c", 1); fprintf(fp, "%c", 0); fprintf(fp, "%c", 24); fprintf(fp, "%c", 0); for (n = 7; n <= 12; n++) { fprintf(fp, "%c", headers[n] & 0x000000FF); fprintf(fp, "%c", (headers[n] & 0x0000FF00) >> 8); fprintf(fp, "%c", (headers[n] & 0x00FF0000) >> 16); fprintf(fp, "%c", (headers[n] & (unsigned int) 0xFF000000) >> 24); } // --- Przeliczanie Amplitudy --- // for(int ii=0; ii<(NX*NY/4); ii++) { u_out[ii].x = sqrt(pow(u_out[ii].x, 2) + pow(u_out[ii].y, 2)); } double mini_data = u_out[0].x; for(int ii=0; ii<(NX*NY/4); ii++) { if (u_out[ii].x < mini_data){ mini_data = u_out[ii].x; } } double max_data = u_out[0].x; mini_data = -mini_data; for(int ii=0; ii<(NX*NY/4); ii++) { u_out[ii].x = u_out[ii].x + mini_data; if (u_out[ii].x > max_data) { max_data = u_out[ii].x; } } for(int ii=0; ii<(NX*NY/4); ii++) { //if (ii%(NX/2) == 0){fprintf (fp,"\n");} u_out[ii].x = u_out[ii].x / max_data * 255.0; //fprintf (fp,"%.0f\t", u_in_fft[ii].x); } // Headers done, now write the data... for (y = HEIGHT - 1; y >= 0; y--) // BMP image format is written from bottom to top... { for (x = 0; x <= WIDTH - 1; x++) { red = u_out[x+(NX/2*y)].x; if (red > 255) red = 255; if (red < 0) red = 0; green = red; blue = red; // --- RGB range from 0 to 255 --- // // if (red > 255) red = 255; if (red < 0) red = 0; // if (green > 255) green = 255; if (green < 0) green = 0; // if (blue > 255) blue = 255; if (blue < 0) blue = 0; // Also, it's written in (b,g,r) format... fprintf (fp, "%c", blue); fprintf (fp, "%c", green); fprintf (fp, "%c", red); } if (extrabytes) // See above - BMP lines must be of lengths divisible by 4. { for (n = 1; n <= extrabytes; n++) { fprintf (fp, "%c", 0); } } } //fclose(fp); cout << "Writing to BMP complete!" << endl; } // --- END SAVING BMP FILE --- // } void ReadImage(const char *fileName,byte **pixels, int32 *width, int32 *height, int32 *bytesPerPixel) { FILE *imageFile = fopen(fileName, "rb"); int32 dataOffset; fseek(imageFile, DATA_OFFSET_OFFSET, SEEK_SET); fread(&dataOffset, 4, 1, imageFile); fseek(imageFile, WIDTH_OFFSET, SEEK_SET); fread(width, 4, 1, imageFile); fseek(imageFile, HEIGHT_OFFSET, SEEK_SET); fread(height, 4, 1, imageFile); int16 bitsPerPixel; fseek(imageFile, BITS_PER_PIXEL_OFFSET, SEEK_SET); fread(&bitsPerPixel, 2, 1, imageFile); *bytesPerPixel = ((int32)bitsPerPixel) / 8; int paddedRowSize = (int)(4 * ceil((float)(*width) / 4.0f))*(*bytesPerPixel); int unpaddedRowSize = (*width)*(*bytesPerPixel); int totalSize = unpaddedRowSize*(*height); cout << "BMP FILE: " << fileName << " | Width: " << *width << " | Height: " << *height << " | Total Size: " << totalSize << " | BitsPerPixel: " << bitsPerPixel << endl; *pixels = (byte*)malloc(totalSize); int i = 0; byte *currentRowPointer = *pixels+((*height-1)*unpaddedRowSize); for (i = 0; i < *height; i++) { fseek(imageFile, dataOffset+(i*paddedRowSize), SEEK_SET); fread(currentRowPointer, 1, unpaddedRowSize, imageFile); currentRowPointer -= unpaddedRowSize; } fclose(imageFile); } /* * start program: ./cudaOpenMP Test_NTO_1024.bmp 2 500.0 633.0 10.0 * start program: ./cudaOpenMP plik_z_przezroczem.BMP Multiply_tmp Odleglosc_Z_mm Dl_fali_Lambda_nm Sampling_micro */ // --- Main Part --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- MAIN --- // int main(int argc, char *argv[]) { cout << "Welcome to CUDA test" << endl; //int COL = atoi(argv[2]); //int ROW = atoi(argv[3]); //double* u_in; //u_in = (double *) malloc ( sizeof(double)* COL * ROW); cout << "WELCOME" << " | " << argv[0] << " | " << argv[1] << " | " << argv[2] << " | " << argv[3] << " | " << atoi(argv[4]) << " | " << atoi(argv[5]) << endl; printf("\n---------------------------\n"); // --- PC Specs finder --- // int num_gpus = 0; // number of CUDA GPUs cudaGetDeviceCount(&num_gpus); if (num_gpus < 1) { printf("no CUDA capable devices were detected\n"); return 1; } printf("Number of host CPUs:\t%d\n", omp_get_num_procs()); printf("Number of CUDA devices:\t%d\n", num_gpus); for (int i = 0; i < num_gpus; i++) { cudaDeviceProp dprop; cudaGetDeviceProperties(&dprop, i); printf(" %d: %s\n", i, dprop.name); } printf("---------------------------\n\n"); // --- Import BMP image --- // byte *pixels; int32 width; int32 height; int32 bytesPerPixel; ReadImage(argv[1], &pixels, &width, &height,&bytesPerPixel); double* Image_Red = (double *) malloc ( sizeof(double)* width * height); double* Image_Green = (double *) malloc ( sizeof(double)* width * height); double* Image_Blue = (double *) malloc ( sizeof(double)* width * height); int iterator = 0; for(int i=0; i<(height*width)*3; i+=3) { Image_Red[iterator] = pixels[i]; Image_Green[iterator] = pixels[i+1]; Image_Blue[iterator] = pixels[i+2]; iterator++; } free(pixels); int32 COL = width; int32 ROW = height; // --- Import TXT file with image --- // // ifstream inputFile; // inputFile.open(argv[1]); // if (inputFile) // { // cout << "Import file: " << argv[1] << endl; // int i,j = 0; // for (i = 0; i < ROW; i++) // { // for (j = 0; j < COL; j++) // { // inputFile >> u_in[i*ROW+j]; // } // } // cout << "Import file - complete" << endl; // } else { // cout << "Error opening the file.\n"; // } // inputFile.close(); int multi = atoi(argv[2]); int NX = COL*multi; int NY = ROW*multi; // --- Przeliczenie hz --- // double sampling = atof(argv[5]) * pow(10.0, (-6)); // Sampling = 10 micro double lam = atof(argv[4]) * (pow(10.0,(-9))); // Lambda = 633 nm double k = 2.0 * M_PI / lam; // Wektor falowy k double z_in = atof(argv[3])*(pow(10.0,(-3))); // Odleglosc propagacji = 0,5 m double z_out = 1000.0*(pow(10.0,(-3))); // Koniec odległości propagacji = 1 m double z_delta = 50.0*(pow(10.0,(-3))); // Skok odległości = 0,05 m //double z = z_in+(ip*z_delta); // Odległość Z dla każdego wątku MPI double z = z_in; printf("\nVariables | k = %.1f | Lambda = %.1f nm | Z = %.4f m | Sampling = %.3f micro | Tablica tymczasowa = x%i |\n\n", k, lam*(pow(10.0,(9))), z, sampling*pow(10.0,(6)), multi); // --- FFT tablicy wejsciowej --- // cufftDoubleComplex* data; data = (cufftDoubleComplex *) malloc ( sizeof(cufftDoubleComplex)* NX * NY); cufftDoubleComplex* dData; cudaMalloc((void **) &dData, sizeof(cufftDoubleComplex)* NX * NY); if (cudaGetLastError() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: Allocate Cuda Memory\n"); return -1; } size_t pitch1; // --- Wpisanie tablicy wejsciowej do wiekszej tablicy tymczasowej --- // u_in_in_big(Image_Green, data, NX, NY, multi); // Poki co 'Image_Green' jako tablica wejsciowa // --- Liczenie U_in = FFT{u_in} --- // cudaMallocPitch(&dData, &pitch1, sizeof(cufftDoubleComplex)*NX, NY); cudaMemcpy2D(dData,pitch1,data,sizeof(cufftDoubleComplex)*NX,sizeof(cufftDoubleComplex)*NX,NX,cudaMemcpyHostToDevice); if (cudaGetLastError() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: Calculate FFT{u_in}\n"); return -1; } if (FFT_Z2Z(dData, NX, NY) == -1) { return -1; } cudaMemcpy(data, dData, sizeof(cufftDoubleComplex)*NX*NY, cudaMemcpyDeviceToHost); // --- Liczenie hz --- // cufftDoubleComplex* hz_tab; hz_tab = (cufftDoubleComplex *) malloc ( sizeof(cufftDoubleComplex)* NX * NY); hz(lam, z, k, sampling, NX, NY, hz_tab); // --- Liczenie hz = FFT{hz_tab} --- // cufftDoubleComplex* hz; cudaMalloc((void **) &hz, sizeof(cufftDoubleComplex)* NX * NY); size_t pitch2; cudaMallocPitch(&hz, &pitch2, sizeof(cufftDoubleComplex)*NX, NY); cudaMemcpy2D(hz,pitch2,hz_tab,sizeof(cufftDoubleComplex)*NX,sizeof(cufftDoubleComplex)*NX,NX,cudaMemcpyHostToDevice); if(cudaGetLastError() != cudaSuccess){ fprintf(stderr, "Cuda error: Failed to allocate: FFT{hz_tab}\n"); return -1; } if (FFT_Z2Z(hz, NX, NY) == -1) { return -1; } // --- Do the actual multiplication --- // multiplyElementwise<<<NX*NY, 1>>>(dData, hz, NX*NY); // --- Liczenie u_out = iFFT{dData = U_OUT} --- // if(IFFT_Z2Z(dData, NX, NY) == -1) { return -1; } cudaMemcpy(data, dData, sizeof(cufftDoubleComplex)*NX*NY, cudaMemcpyDeviceToHost); // --- ROLL cwiartek, zeby wszystko sie zgadzalo na koniec --- // cufftDoubleComplex* u_out; u_out = (cufftDoubleComplex *) malloc (sizeof(cufftDoubleComplex)* NX/2 * NY/2); Qroll(u_out, data, NX, NY); // --- Zapis do pliku BMP --- // char filename[128]; snprintf ( filename, 128, "z_%.3lf-m_lam_%.1lf-nm_samp_%.1lf-micro.BMP", z, lam*(pow(10.0,(9))), sampling*(pow(10.0,(6)))); FILE* fp = fopen(filename,"wb"); // --- Przeliczanie Amplitudy i Zapis do pliku --- // //amplitude_print(u_out, NX, NY, fp); BMP_Save_Amplitude(u_out, NX, NY, fp); fclose(fp); // --- Zwalnianie pamieci --- // cudaFree(u_out); cudaFree(data); cudaFree(dData); cudaFree(hz_tab); cudaFree(hz); free(Image_Red); free(Image_Green); free(Image_Blue); return 0; }
6c5102a9cf306ebd806378d6a3487e1f0970d68f.hip
// !!! This is a file automatically generated by hipify!!! /** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_expstep_device.h" namespace RPU { #define UPDATE_ONCE \ { \ T z = 2.0 * w / b_diff * a + b; \ T y = 1.0 - A * __expf(gamma * z); \ if (y > 0) { \ if (noise_std_dw > 0) { \ T stoch_value = hiprand_normal(&local_state); \ stoch_value *= noise_std_dw; \ w += y * (stoch_value + 1.0) * dw; \ } else { \ w += y * dw; \ } \ w = (w > wmax) ? wmax : w; \ w = (w < wmin) ? wmin : w; \ } \ } template <typename T> struct UpdateFunctorExpStep { __device__ __forceinline__ void operator()( T &apparent_weight, uint32_t n, uint32_t negative, const float4 par_4, const float2 par_2, T &persistent_weight, const T *global_pars, T noise_std_dw, hiprandState_t &local_state) { // par_4 order (min_bound, scale_down, max_bound, scale_up ) // global_pars see below T uw_std = global_pars[6]; T wmax = par_4.z; //[2]; T wmin = par_4.x; //[0]; T b_diff = (wmax - wmin); T &w = uw_std > 0 ? persistent_weight : apparent_weight; if (b_diff > 0) { // only do something when bounds make sense T A = negative ? global_pars[1] : global_pars[0]; // 1: up, 0: down T gamma = negative ? global_pars[3] : (-global_pars[2]); // 3: up, 2 down T a = global_pars[4]; T b = global_pars[5]; T dw = (negative > 0) ? (par_4.w) : (-par_4.y); // [3], [1] // n is larger 0 in any case if (n == 1) { UPDATE_ONCE; } else { for (int i_updates = 0; i_updates < n; i_updates++) { UPDATE_ONCE; } } // add update write noise onto apparent weight if (uw_std > 0) { T stoch_value = hiprand_normal(&local_state); apparent_weight = persistent_weight + uw_std * stoch_value; } } } }; #undef UPDATE_ONCE RPUCUDA_DEVICE_ADD_FUNCTOR_UPDATE_KERNELS(ExpStep, UpdateFunctorExpStep<T>, 7); template class ExpStepRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class ExpStepRPUDeviceCuda<double>; #endif } // namespace RPU
6c5102a9cf306ebd806378d6a3487e1f0970d68f.cu
/** * (C) Copyright 2020, 2021 IBM. All Rights Reserved. * * This code is licensed under the Apache License, Version 2.0. You may * obtain a copy of this license in the LICENSE.txt file in the root directory * of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. * * Any modifications or derivative works of this code must retain this * copyright notice, and modified files need to carry a notice indicating * that they have been altered from the originals. */ #include "pwu_kernel_parameter.h" #include "rpu_pulsed_meta_parameter.h" #include "rpucuda_expstep_device.h" namespace RPU { #define UPDATE_ONCE \ { \ T z = 2.0 * w / b_diff * a + b; \ T y = 1.0 - A * __expf(gamma * z); \ if (y > 0) { \ if (noise_std_dw > 0) { \ T stoch_value = curand_normal(&local_state); \ stoch_value *= noise_std_dw; \ w += y * (stoch_value + 1.0) * dw; \ } else { \ w += y * dw; \ } \ w = (w > wmax) ? wmax : w; \ w = (w < wmin) ? wmin : w; \ } \ } template <typename T> struct UpdateFunctorExpStep { __device__ __forceinline__ void operator()( T &apparent_weight, uint32_t n, uint32_t negative, const float4 par_4, const float2 par_2, T &persistent_weight, const T *global_pars, T noise_std_dw, curandState &local_state) { // par_4 order (min_bound, scale_down, max_bound, scale_up ) // global_pars see below T uw_std = global_pars[6]; T wmax = par_4.z; //[2]; T wmin = par_4.x; //[0]; T b_diff = (wmax - wmin); T &w = uw_std > 0 ? persistent_weight : apparent_weight; if (b_diff > 0) { // only do something when bounds make sense T A = negative ? global_pars[1] : global_pars[0]; // 1: up, 0: down T gamma = negative ? global_pars[3] : (-global_pars[2]); // 3: up, 2 down T a = global_pars[4]; T b = global_pars[5]; T dw = (negative > 0) ? (par_4.w) : (-par_4.y); // [3], [1] // n is larger 0 in any case if (n == 1) { UPDATE_ONCE; } else { for (int i_updates = 0; i_updates < n; i_updates++) { UPDATE_ONCE; } } // add update write noise onto apparent weight if (uw_std > 0) { T stoch_value = curand_normal(&local_state); apparent_weight = persistent_weight + uw_std * stoch_value; } } } }; #undef UPDATE_ONCE RPUCUDA_DEVICE_ADD_FUNCTOR_UPDATE_KERNELS(ExpStep, UpdateFunctorExpStep<T>, 7); template class ExpStepRPUDeviceCuda<float>; #ifdef RPU_USE_DOUBLE template class ExpStepRPUDeviceCuda<double>; #endif } // namespace RPU
0fb3e24a6215026aed165e9c7e15c385ee04b112.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! \file graph_rescaling.cpp \brief Routines regarding lambda-based graph rescaling. */ #include "graph_rescaling.cuh" #define Blocksize 512 /*we will use warps for the parallel evaluation of the expression and the serial * code for changing the interval*/ __global__ void bisectionSearchKernel(volatile matval *__restrict__ sig2, volatile matval *__restrict__ p_sp, const matidx *const ir, const int n, const matval lambda, const matval tolerance, const bool dropLeafEdge) { __shared__ matval sdata[Blocksize / 32]; const unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int warp_id = thread_id / 32; const unsigned int warp_id_block = threadIdx.x / 32; register const unsigned int lane = thread_id % 32; register unsigned int row = warp_id; const unsigned int n_warps = gridDim.x * blockDim.x / 32; register matval sigma; register matval a; register matval c; register matval sum; register matval perplexity_diff; for (; row < n; row = row + n_warps) { a = -1e3; c = 1e7; sum = 0; sigma = 1; perplexity_diff = 1 - lambda; int found = 0; int iter = 0; unsigned int row_start = ir[row]; unsigned int row_end = ir[row + 1]; while (__all_sync(FULL_WARP_MASK, found != 1) && iter < 100) { sum = 0; for (unsigned int element = row_start + lane; element < row_end; element += 32) { sum += expf(-p_sp[element] * sigma); } sum = warp_reduce(sum); if (lane == 0) { perplexity_diff = sum - lambda; if (perplexity_diff < tolerance && perplexity_diff > -tolerance) { found = 1; } if (perplexity_diff > 0) { a = sigma; if (c > 1e7) { sigma = 2 * a; } else { sigma = 0.5 * (a + c); } } else { c = sigma; sigma = 0.5 * (a + c); } sdata[warp_id_block] = sigma; } __syncwarp(FULL_WARP_MASK); sigma = sdata[warp_id_block]; iter++; } if (lane == 0) { sig2[row] = sigma; } sum = 0; for (unsigned int element = row_start + lane; element < row_end; element += 32) { p_sp[element] = expf(-p_sp[element] * sigma); sum += p_sp[element]; } sum = warp_reduce(sum); if (lane == 0) { sdata[warp_id_block] = sum; } __syncwarp(FULL_WARP_MASK); sum = sdata[warp_id_block]; for (unsigned int element = row_start + lane; element < row_end; element += 32) { p_sp[element] /= sum; } // override lambda value of leaf node? if (dropLeafEdge && (row_end - row_start == 1)) p_sp[row_start] = 0; } } void lambdaRescalingGPU(sparse_matrix<matval> P, matval lambda, bool dist, bool dropLeafEdge) { matval tolBinary = 1e-5; // int maxIter = 100; thrust::device_vector<matval> sig2(P.n); if (dist) std::cout << "Input considered as distances" << std::endl; hipLaunchKernelGGL(( bisectionSearchKernel), dim3(64), dim3(Blocksize), 0, 0, thrust::raw_pointer_cast(sig2.data()), P.val, P.row, P.n, lambda, tolBinary, dropLeafEdge); } #define N_THREADS 1024 __global__ void makeStochasticKernel(matval *val, matidx *row, uint32_t n, uint32_t *stoch) { const uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t warp_id = thread_id / 32; const uint32_t lane = thread_id % 32; __shared__ coord sdata[N_THREADS / 32]; const unsigned int warp_id_block = threadIdx.x / 32; const unsigned int n_warps = gridDim.x * blockDim.x / 32; for (uint32_t j = warp_id; j < n; j = j + n_warps) { matval sum = 0; for (uint32_t t = row[j] + lane; t < row[j + 1]; t += 32) { sum += val[t]; } sum = warp_reduce(sum); if (lane == 0) { sdata[warp_id_block] = sum; } __syncwarp(FULL_WARP_MASK); sum = sdata[warp_id_block]; if (fabs(sum - 1) > 1e-5) { for (uint32_t t = row[j] + lane; t < row[j + 1]; t += 32) { val[t] /= sum; } if (lane == 0) { stoch[j] = 0; } } else { if (lane == 0) { stoch[j] = 1; } } } } uint32_t makeStochasticGPU(coord *val, int *row, int n) { uint32_t *stoch; gpuErrchk(hipMallocManaged(&stoch, n * sizeof(uint32_t))); hipLaunchKernelGGL(( makeStochasticKernel), dim3(64), dim3(N_THREADS), 0, 0, val, row, n, stoch); hipDeviceSynchronize(); uint32_t nStoch = thrust::reduce(stoch, stoch + n); gpuErrchk(hipFree(stoch)); return nStoch; } uint32_t makeStochasticGPU(sparse_matrix<matval> *P) { uint32_t *stoch; gpuErrchk(hipMallocManaged(&stoch, P->n * sizeof(uint32_t))); hipLaunchKernelGGL(( makeStochasticKernel), dim3(64), dim3(512), 0, 0, P->val, P->row, P->n, stoch); hipDeviceSynchronize(); uint32_t nStoch = thrust::reduce(stoch, stoch + P->n); gpuErrchk(hipFree(stoch)); return nStoch; } /* (P+P^T)/2*/ sparse_matrix<matval>* symmetrizeMatrixGPU(sparse_matrix<matval> *A, hipsparseHandle_t &handle) { // Sort the matrix properly size_t permutation_buffer_byte_size = 0; void *permutation_buffer = NULL; int32_t *permutation = NULL; // Initialize the matrix descriptor hipsparseMatDescr_t matrix_descriptor; hipsparseCreateMatDescr(&matrix_descriptor); hipsparseSetMatType(matrix_descriptor, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(matrix_descriptor, HIPSPARSE_INDEX_BASE_ZERO); // step 1: Allocate memory buffer hipsparseXcsrsort_bufferSizeExt(handle, A->m, A->n, A->nnz, A->row, A->col, &permutation_buffer_byte_size); hipDeviceSynchronize(); hipMalloc(&permutation_buffer, sizeof(char) * permutation_buffer_byte_size); // step 2: Setup permutation vector permutation to be the identity hipMalloc(reinterpret_cast<void **>(&permutation), sizeof(int32_t) * A->nnz); hipsparseCreateIdentityPermutation(handle, A->nnz, permutation); hipDeviceSynchronize(); // step 3: Sort CSR format hipsparseXcsrsort(handle, A->m, A->n, A->nnz, matrix_descriptor, A->row, A->col, permutation, permutation_buffer); hipDeviceSynchronize(); // step 4: Gather sorted csr_values float *csr_values_a_sorted = nullptr; hipMalloc(reinterpret_cast<void **>(&csr_values_a_sorted), (A->nnz) * sizeof(float)); hipsparseSgthr(handle, A->nnz, A->val, csr_values_a_sorted, permutation, HIPSPARSE_INDEX_BASE_ZERO); hipDeviceSynchronize(); // Free some memory hipFree(permutation_buffer); hipFree(permutation); A->val = csr_values_a_sorted; coord *d_csrValB; gpuErrchk(hipMalloc(&d_csrValB, A->nnz * sizeof(coord))); int *d_csrRowPtrB; gpuErrchk(hipMalloc(&d_csrRowPtrB, (A->m + 1) * sizeof(int))); int *d_csrColIndB; gpuErrchk(hipMalloc(&d_csrColIndB, A->nnz * sizeof(int))); hipsparseScsr2csc(handle, A->m, A->n, A->nnz, A->val, A->row, A->col, d_csrValB, d_csrColIndB, d_csrRowPtrB, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO); hipDeviceSynchronize(); // --- Summing the two matrices int baseC, nnz3; coord *sym_val; int *sym_col, *sym_row; // --- nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnz3; hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST); gpuErrchk(hipMalloc(&sym_row, (A->m + 1) * sizeof(int))); hipsparseXcsrgeamNnz(handle, A->m, A->n, matrix_descriptor, A->nnz, A->row, A->col, matrix_descriptor, A->nnz, d_csrRowPtrB, d_csrColIndB, matrix_descriptor, sym_row, nnzTotalDevHostPtr); if (NULL != nnzTotalDevHostPtr) { nnz3 = *nnzTotalDevHostPtr; } else { gpuErrchk( hipMemcpy(&nnz3, sym_row + A->m, sizeof(int), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&baseC, sym_row, sizeof(int), hipMemcpyDeviceToHost)); nnz3 -= baseC; } gpuErrchk(hipMalloc(&sym_col, nnz3 * sizeof(int))); gpuErrchk(hipMalloc(&sym_val, nnz3 * sizeof(coord))); coord alpha = 0.5, beta = 0.5; hipsparseScsrgeam(handle, A->m, A->n, &alpha, matrix_descriptor, A->nnz, A->val, A->row, A->col, &beta, matrix_descriptor, A->nnz, d_csrValB, d_csrRowPtrB, d_csrColIndB, matrix_descriptor, sym_val, sym_row, sym_col); hipDeviceSynchronize(); gpuErrchk(hipFree(d_csrValB)); gpuErrchk(hipFree(d_csrRowPtrB)); gpuErrchk(hipFree(d_csrColIndB)); sparse_matrix<coord>* C=(sparse_matrix<coord> *)malloc(sizeof(sparse_matrix<coord>)); C->n=A->n; C->m=A->m; C->nnz = nnz3; C->row = sym_row; C->col = sym_col; C->val = sym_val; return C; } int SymmetrizeMatrix(hipsparseHandle_t &handle, float** d_symmetrized_values, int** d_symmetrized_rowptr, int** d_symmetrized_colind, float *csr_values_a, int *csr_column_ptr_a, const int num_points, int* csr_row_ptr_a, const int nnz ) { // Initialize the matrix descriptor hipsparseMatDescr_t matrix_descriptor; hipsparseCreateMatDescr(&matrix_descriptor); hipsparseSetMatType(matrix_descriptor, HIPSPARSE_MATRIX_TYPE_GENERAL); hipsparseSetMatIndexBase(matrix_descriptor, HIPSPARSE_INDEX_BASE_ZERO); // Sort the matrix properly size_t permutation_buffer_byte_size = 0; void *permutation_buffer = NULL; int32_t *permutation = NULL; // step 1: Allocate memory buffer hipsparseXcsrsort_bufferSizeExt(handle, num_points, num_points, nnz, csr_row_ptr_a, csr_column_ptr_a, &permutation_buffer_byte_size); hipDeviceSynchronize(); hipMalloc(&permutation_buffer, sizeof(char)*permutation_buffer_byte_size); // step 2: Setup permutation vector permutation to be the identity hipMalloc(reinterpret_cast<void**>(&permutation), sizeof(int32_t)*nnz); hipsparseCreateIdentityPermutation(handle, nnz, permutation); hipDeviceSynchronize(); // step 3: Sort CSR format hipsparseXcsrsort(handle, num_points, num_points, nnz, matrix_descriptor, csr_row_ptr_a, csr_column_ptr_a, permutation, permutation_buffer); hipDeviceSynchronize(); // step 4: Gather sorted csr_values float* csr_values_a_sorted = nullptr; hipMalloc(reinterpret_cast<void**>(&csr_values_a_sorted), (nnz)*sizeof(float)); hipsparseSgthr(handle,nnz, csr_values_a, csr_values_a_sorted, permutation, HIPSPARSE_INDEX_BASE_ZERO); hipDeviceSynchronize(); // Free some memory hipFree(permutation_buffer); hipFree(permutation); csr_values_a = csr_values_a_sorted; // We need A^T, so we do a csr2csc() call int32_t* csc_row_ptr_at = nullptr; hipMalloc(reinterpret_cast<void**>(&csc_row_ptr_at), (nnz)*sizeof(int32_t)); int32_t* csc_column_ptr_at = nullptr; hipMalloc(reinterpret_cast<void**>(&csc_column_ptr_at), (num_points+1)*sizeof(int32_t)); float* csc_values_at = nullptr; hipMalloc(reinterpret_cast<void**>(&csc_values_at), (nnz)*sizeof(float)); // Do the transpose operation hipsparseScsr2csc(handle, num_points, num_points, nnz, csr_values_a, csr_row_ptr_a, csr_column_ptr_a, csc_values_at, csc_row_ptr_at, csc_column_ptr_at, HIPSPARSE_ACTION_NUMERIC, HIPSPARSE_INDEX_BASE_ZERO); hipDeviceSynchronize(); // Now compute the output size of the matrix int32_t base_C, num_nonzeros_C; int32_t symmetrized_num_nonzeros = -1; hipsparseSetPointerMode(handle, HIPSPARSE_POINTER_MODE_HOST); //d_symmetrized_rowptr.resize(num_points+1); gpuErrchk(hipMallocManaged(&(*d_symmetrized_rowptr), (num_points+1) * sizeof(int))); hipsparseXcsrgeamNnz(handle, num_points, num_points, matrix_descriptor, nnz, csr_row_ptr_a, csr_column_ptr_a, matrix_descriptor, nnz, csc_column_ptr_at, csc_row_ptr_at, matrix_descriptor, (*d_symmetrized_rowptr), &symmetrized_num_nonzeros); hipDeviceSynchronize(); // Do some useful checking... if (-1 != symmetrized_num_nonzeros) { num_nonzeros_C = symmetrized_num_nonzeros; } else { hipMemcpy(&num_nonzeros_C, (*d_symmetrized_rowptr) + num_points, sizeof(int32_t), hipMemcpyDeviceToHost); hipMemcpy(&base_C, (*d_symmetrized_rowptr), sizeof(int), hipMemcpyDeviceToHost); } // Allocate memory for the new summed array gpuErrchk(hipMallocManaged(&(*d_symmetrized_colind), num_nonzeros_C * sizeof(int))); gpuErrchk(hipMallocManaged(&(*d_symmetrized_values), num_nonzeros_C * sizeof(float))); // Sum the arrays //float kAlpha = 1.0f ; //float kBeta = 1.0f ; float kAlpha = 1.0f / (2.0f * num_points); float kBeta = 1.0f / (2.0f * num_points); hipsparseScsrgeam(handle, num_points, num_points, &kAlpha, matrix_descriptor, nnz, csr_values_a, csr_row_ptr_a, csr_column_ptr_a, &kBeta, matrix_descriptor, nnz, csc_values_at, csc_column_ptr_at, csc_row_ptr_at, matrix_descriptor, (*d_symmetrized_values), (*d_symmetrized_rowptr), (*d_symmetrized_colind)); hipDeviceSynchronize(); // Free the memory we were using... hipFree(csr_values_a); hipFree(csc_values_at); hipFree(csr_row_ptr_a); hipFree(csc_column_ptr_at); hipFree(csc_row_ptr_at); return num_nonzeros_C; }
0fb3e24a6215026aed165e9c7e15c385ee04b112.cu
/*! \file graph_rescaling.cpp \brief Routines regarding lambda-based graph rescaling. */ #include "graph_rescaling.cuh" #define Blocksize 512 /*we will use warps for the parallel evaluation of the expression and the serial * code for changing the interval*/ __global__ void bisectionSearchKernel(volatile matval *__restrict__ sig2, volatile matval *__restrict__ p_sp, const matidx *const ir, const int n, const matval lambda, const matval tolerance, const bool dropLeafEdge) { __shared__ matval sdata[Blocksize / 32]; const unsigned int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int warp_id = thread_id / 32; const unsigned int warp_id_block = threadIdx.x / 32; register const unsigned int lane = thread_id % 32; register unsigned int row = warp_id; const unsigned int n_warps = gridDim.x * blockDim.x / 32; register matval sigma; register matval a; register matval c; register matval sum; register matval perplexity_diff; for (; row < n; row = row + n_warps) { a = -1e3; c = 1e7; sum = 0; sigma = 1; perplexity_diff = 1 - lambda; int found = 0; int iter = 0; unsigned int row_start = ir[row]; unsigned int row_end = ir[row + 1]; while (__all_sync(FULL_WARP_MASK, found != 1) && iter < 100) { sum = 0; for (unsigned int element = row_start + lane; element < row_end; element += 32) { sum += expf(-p_sp[element] * sigma); } sum = warp_reduce(sum); if (lane == 0) { perplexity_diff = sum - lambda; if (perplexity_diff < tolerance && perplexity_diff > -tolerance) { found = 1; } if (perplexity_diff > 0) { a = sigma; if (c > 1e7) { sigma = 2 * a; } else { sigma = 0.5 * (a + c); } } else { c = sigma; sigma = 0.5 * (a + c); } sdata[warp_id_block] = sigma; } __syncwarp(FULL_WARP_MASK); sigma = sdata[warp_id_block]; iter++; } if (lane == 0) { sig2[row] = sigma; } sum = 0; for (unsigned int element = row_start + lane; element < row_end; element += 32) { p_sp[element] = expf(-p_sp[element] * sigma); sum += p_sp[element]; } sum = warp_reduce(sum); if (lane == 0) { sdata[warp_id_block] = sum; } __syncwarp(FULL_WARP_MASK); sum = sdata[warp_id_block]; for (unsigned int element = row_start + lane; element < row_end; element += 32) { p_sp[element] /= sum; } // override lambda value of leaf node? if (dropLeafEdge && (row_end - row_start == 1)) p_sp[row_start] = 0; } } void lambdaRescalingGPU(sparse_matrix<matval> P, matval lambda, bool dist, bool dropLeafEdge) { matval tolBinary = 1e-5; // int maxIter = 100; thrust::device_vector<matval> sig2(P.n); if (dist) std::cout << "Input considered as distances" << std::endl; bisectionSearchKernel<<<64, Blocksize>>>( thrust::raw_pointer_cast(sig2.data()), P.val, P.row, P.n, lambda, tolBinary, dropLeafEdge); } #define N_THREADS 1024 __global__ void makeStochasticKernel(matval *val, matidx *row, uint32_t n, uint32_t *stoch) { const uint32_t thread_id = blockIdx.x * blockDim.x + threadIdx.x; const uint32_t warp_id = thread_id / 32; const uint32_t lane = thread_id % 32; __shared__ coord sdata[N_THREADS / 32]; const unsigned int warp_id_block = threadIdx.x / 32; const unsigned int n_warps = gridDim.x * blockDim.x / 32; for (uint32_t j = warp_id; j < n; j = j + n_warps) { matval sum = 0; for (uint32_t t = row[j] + lane; t < row[j + 1]; t += 32) { sum += val[t]; } sum = warp_reduce(sum); if (lane == 0) { sdata[warp_id_block] = sum; } __syncwarp(FULL_WARP_MASK); sum = sdata[warp_id_block]; if (fabs(sum - 1) > 1e-5) { for (uint32_t t = row[j] + lane; t < row[j + 1]; t += 32) { val[t] /= sum; } if (lane == 0) { stoch[j] = 0; } } else { if (lane == 0) { stoch[j] = 1; } } } } uint32_t makeStochasticGPU(coord *val, int *row, int n) { uint32_t *stoch; gpuErrchk(cudaMallocManaged(&stoch, n * sizeof(uint32_t))); makeStochasticKernel<<<64, N_THREADS>>>(val, row, n, stoch); cudaDeviceSynchronize(); uint32_t nStoch = thrust::reduce(stoch, stoch + n); gpuErrchk(cudaFree(stoch)); return nStoch; } uint32_t makeStochasticGPU(sparse_matrix<matval> *P) { uint32_t *stoch; gpuErrchk(cudaMallocManaged(&stoch, P->n * sizeof(uint32_t))); makeStochasticKernel<<<64, 512>>>(P->val, P->row, P->n, stoch); cudaDeviceSynchronize(); uint32_t nStoch = thrust::reduce(stoch, stoch + P->n); gpuErrchk(cudaFree(stoch)); return nStoch; } /* (P+P^T)/2*/ sparse_matrix<matval>* symmetrizeMatrixGPU(sparse_matrix<matval> *A, cusparseHandle_t &handle) { // Sort the matrix properly size_t permutation_buffer_byte_size = 0; void *permutation_buffer = NULL; int32_t *permutation = NULL; // Initialize the matrix descriptor cusparseMatDescr_t matrix_descriptor; cusparseCreateMatDescr(&matrix_descriptor); cusparseSetMatType(matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(matrix_descriptor, CUSPARSE_INDEX_BASE_ZERO); // step 1: Allocate memory buffer cusparseXcsrsort_bufferSizeExt(handle, A->m, A->n, A->nnz, A->row, A->col, &permutation_buffer_byte_size); cudaDeviceSynchronize(); cudaMalloc(&permutation_buffer, sizeof(char) * permutation_buffer_byte_size); // step 2: Setup permutation vector permutation to be the identity cudaMalloc(reinterpret_cast<void **>(&permutation), sizeof(int32_t) * A->nnz); cusparseCreateIdentityPermutation(handle, A->nnz, permutation); cudaDeviceSynchronize(); // step 3: Sort CSR format cusparseXcsrsort(handle, A->m, A->n, A->nnz, matrix_descriptor, A->row, A->col, permutation, permutation_buffer); cudaDeviceSynchronize(); // step 4: Gather sorted csr_values float *csr_values_a_sorted = nullptr; cudaMalloc(reinterpret_cast<void **>(&csr_values_a_sorted), (A->nnz) * sizeof(float)); cusparseSgthr(handle, A->nnz, A->val, csr_values_a_sorted, permutation, CUSPARSE_INDEX_BASE_ZERO); cudaDeviceSynchronize(); // Free some memory cudaFree(permutation_buffer); cudaFree(permutation); A->val = csr_values_a_sorted; coord *d_csrValB; gpuErrchk(cudaMalloc(&d_csrValB, A->nnz * sizeof(coord))); int *d_csrRowPtrB; gpuErrchk(cudaMalloc(&d_csrRowPtrB, (A->m + 1) * sizeof(int))); int *d_csrColIndB; gpuErrchk(cudaMalloc(&d_csrColIndB, A->nnz * sizeof(int))); cusparseScsr2csc(handle, A->m, A->n, A->nnz, A->val, A->row, A->col, d_csrValB, d_csrColIndB, d_csrRowPtrB, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO); cudaDeviceSynchronize(); // --- Summing the two matrices int baseC, nnz3; coord *sym_val; int *sym_col, *sym_row; // --- nnzTotalDevHostPtr points to host memory int *nnzTotalDevHostPtr = &nnz3; cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST); gpuErrchk(cudaMalloc(&sym_row, (A->m + 1) * sizeof(int))); cusparseXcsrgeamNnz(handle, A->m, A->n, matrix_descriptor, A->nnz, A->row, A->col, matrix_descriptor, A->nnz, d_csrRowPtrB, d_csrColIndB, matrix_descriptor, sym_row, nnzTotalDevHostPtr); if (NULL != nnzTotalDevHostPtr) { nnz3 = *nnzTotalDevHostPtr; } else { gpuErrchk( cudaMemcpy(&nnz3, sym_row + A->m, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&baseC, sym_row, sizeof(int), cudaMemcpyDeviceToHost)); nnz3 -= baseC; } gpuErrchk(cudaMalloc(&sym_col, nnz3 * sizeof(int))); gpuErrchk(cudaMalloc(&sym_val, nnz3 * sizeof(coord))); coord alpha = 0.5, beta = 0.5; cusparseScsrgeam(handle, A->m, A->n, &alpha, matrix_descriptor, A->nnz, A->val, A->row, A->col, &beta, matrix_descriptor, A->nnz, d_csrValB, d_csrRowPtrB, d_csrColIndB, matrix_descriptor, sym_val, sym_row, sym_col); cudaDeviceSynchronize(); gpuErrchk(cudaFree(d_csrValB)); gpuErrchk(cudaFree(d_csrRowPtrB)); gpuErrchk(cudaFree(d_csrColIndB)); sparse_matrix<coord>* C=(sparse_matrix<coord> *)malloc(sizeof(sparse_matrix<coord>)); C->n=A->n; C->m=A->m; C->nnz = nnz3; C->row = sym_row; C->col = sym_col; C->val = sym_val; return C; } int SymmetrizeMatrix(cusparseHandle_t &handle, float** d_symmetrized_values, int** d_symmetrized_rowptr, int** d_symmetrized_colind, float *csr_values_a, int *csr_column_ptr_a, const int num_points, int* csr_row_ptr_a, const int nnz ) { // Initialize the matrix descriptor cusparseMatDescr_t matrix_descriptor; cusparseCreateMatDescr(&matrix_descriptor); cusparseSetMatType(matrix_descriptor, CUSPARSE_MATRIX_TYPE_GENERAL); cusparseSetMatIndexBase(matrix_descriptor, CUSPARSE_INDEX_BASE_ZERO); // Sort the matrix properly size_t permutation_buffer_byte_size = 0; void *permutation_buffer = NULL; int32_t *permutation = NULL; // step 1: Allocate memory buffer cusparseXcsrsort_bufferSizeExt(handle, num_points, num_points, nnz, csr_row_ptr_a, csr_column_ptr_a, &permutation_buffer_byte_size); cudaDeviceSynchronize(); cudaMalloc(&permutation_buffer, sizeof(char)*permutation_buffer_byte_size); // step 2: Setup permutation vector permutation to be the identity cudaMalloc(reinterpret_cast<void**>(&permutation), sizeof(int32_t)*nnz); cusparseCreateIdentityPermutation(handle, nnz, permutation); cudaDeviceSynchronize(); // step 3: Sort CSR format cusparseXcsrsort(handle, num_points, num_points, nnz, matrix_descriptor, csr_row_ptr_a, csr_column_ptr_a, permutation, permutation_buffer); cudaDeviceSynchronize(); // step 4: Gather sorted csr_values float* csr_values_a_sorted = nullptr; cudaMalloc(reinterpret_cast<void**>(&csr_values_a_sorted), (nnz)*sizeof(float)); cusparseSgthr(handle,nnz, csr_values_a, csr_values_a_sorted, permutation, CUSPARSE_INDEX_BASE_ZERO); cudaDeviceSynchronize(); // Free some memory cudaFree(permutation_buffer); cudaFree(permutation); csr_values_a = csr_values_a_sorted; // We need A^T, so we do a csr2csc() call int32_t* csc_row_ptr_at = nullptr; cudaMalloc(reinterpret_cast<void**>(&csc_row_ptr_at), (nnz)*sizeof(int32_t)); int32_t* csc_column_ptr_at = nullptr; cudaMalloc(reinterpret_cast<void**>(&csc_column_ptr_at), (num_points+1)*sizeof(int32_t)); float* csc_values_at = nullptr; cudaMalloc(reinterpret_cast<void**>(&csc_values_at), (nnz)*sizeof(float)); // Do the transpose operation cusparseScsr2csc(handle, num_points, num_points, nnz, csr_values_a, csr_row_ptr_a, csr_column_ptr_a, csc_values_at, csc_row_ptr_at, csc_column_ptr_at, CUSPARSE_ACTION_NUMERIC, CUSPARSE_INDEX_BASE_ZERO); cudaDeviceSynchronize(); // Now compute the output size of the matrix int32_t base_C, num_nonzeros_C; int32_t symmetrized_num_nonzeros = -1; cusparseSetPointerMode(handle, CUSPARSE_POINTER_MODE_HOST); //d_symmetrized_rowptr.resize(num_points+1); gpuErrchk(cudaMallocManaged(&(*d_symmetrized_rowptr), (num_points+1) * sizeof(int))); cusparseXcsrgeamNnz(handle, num_points, num_points, matrix_descriptor, nnz, csr_row_ptr_a, csr_column_ptr_a, matrix_descriptor, nnz, csc_column_ptr_at, csc_row_ptr_at, matrix_descriptor, (*d_symmetrized_rowptr), &symmetrized_num_nonzeros); cudaDeviceSynchronize(); // Do some useful checking... if (-1 != symmetrized_num_nonzeros) { num_nonzeros_C = symmetrized_num_nonzeros; } else { cudaMemcpy(&num_nonzeros_C, (*d_symmetrized_rowptr) + num_points, sizeof(int32_t), cudaMemcpyDeviceToHost); cudaMemcpy(&base_C, (*d_symmetrized_rowptr), sizeof(int), cudaMemcpyDeviceToHost); } // Allocate memory for the new summed array gpuErrchk(cudaMallocManaged(&(*d_symmetrized_colind), num_nonzeros_C * sizeof(int))); gpuErrchk(cudaMallocManaged(&(*d_symmetrized_values), num_nonzeros_C * sizeof(float))); // Sum the arrays //float kAlpha = 1.0f ; //float kBeta = 1.0f ; float kAlpha = 1.0f / (2.0f * num_points); float kBeta = 1.0f / (2.0f * num_points); cusparseScsrgeam(handle, num_points, num_points, &kAlpha, matrix_descriptor, nnz, csr_values_a, csr_row_ptr_a, csr_column_ptr_a, &kBeta, matrix_descriptor, nnz, csc_values_at, csc_column_ptr_at, csc_row_ptr_at, matrix_descriptor, (*d_symmetrized_values), (*d_symmetrized_rowptr), (*d_symmetrized_colind)); cudaDeviceSynchronize(); // Free the memory we were using... cudaFree(csr_values_a); cudaFree(csc_values_at); cudaFree(csr_row_ptr_a); cudaFree(csc_column_ptr_at); cudaFree(csc_row_ptr_at); return num_nonzeros_C; }
7f6c13c9a1785262b960f555b2b4e487258764d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * @Description: GPU implementation of fp_par_2d.h * @Author: Tianling Lyu * @Date: 2019-11-13 14:42:30 * @LastEditors: Tianling Lyu * @LastEditTime: 2019-12-09 11:50:37 */ #include "include/fp_par_2d.h" #define _USE_MATH_DEFINES #include <cmath> #include "cuda/cuda_common.h" #ifndef M_PI #define M_PI 3.14159265358979323846264338327950288 #define M_PI_4 M_PI/4 #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN #define MIN(x, y) ((x) > (y) ? (y) : (x)) #endif namespace ct_recon { //#if USE_ROCM __global__ void ParallelProjection2DRayCastingPrepareKernel(double* sincostbl, double* begins, int* nsteps, const ParallelProjection2DParam param, const double step_size, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; // useful constants const double x_center = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double y_center = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double fov_squ = param.fov*param.fov; const double s_begin = -(static_cast<double>(param.ns-1) / 2.0 + param.offset_s) * param.ds; const bool usex = sin_angle > (1.0/1.414); const double step_x = step_size * sin_angle; const double step_y = step_size * cos_angle; // variables double s = s_begin, x1, y1, x2, y2; double half_subtense_squ, half_subtense; double* begin_ptr = begins + 2*ia*param.ns; int* nstep_ptr = nsteps + ia*param.ns; for (int is = 0; is < param.ns; ++is) { half_subtense_squ = fov_squ - s*s; if (half_subtense_squ > 0) { half_subtense = sqrt(half_subtense_squ); // intersection points x1 = (s*cos_angle - half_subtense*sin_angle) / param.dx + x_center; y1 = -(s*sin_angle + half_subtense*cos_angle) / param.dy + y_center; x2 = (s*cos_angle + half_subtense*sin_angle) / param.dx + x_center; y2 = -(s*sin_angle - half_subtense*cos_angle) / param.dy + y_center; if (usex) *nstep_ptr = static_cast<int>((x2 - x1) / step_x); else *nstep_ptr = static_cast<int>((y2 - y1) / step_y); // store results begin_ptr[0] = x1; begin_ptr[1] = y1; } else { // not intersected with FoV *nstep_ptr = 0; begin_ptr[0] = 0; begin_ptr[1] = 0; } // next channel begin_ptr += 2; ++nstep_ptr; s += param.ds; } } return; } bool ParallelProjection2DRayCastingPrepare::calculate_on_gpu(double* sincostbl, double* begins, int* nsteps, hipStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); hipLaunchKernelGGL(( ParallelProjection2DRayCastingPrepareKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, sincostbl, begins, nsteps, param_, step_size_, param_.na); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelProjection2DRayCastingKernel(const T* img, T* proj, const double* sincostbl, const double* begins, const int* nsteps, const ParallelProjection2DParam param, const double step_size, const int n_elements) { for (int thread_id : CudaGridRangeX(n_elements)) { int ia = thread_id / param.ns; int is = thread_id % param.ns; double sin_angle = sincostbl[2*ia]; double cos_angle = sincostbl[2*ia + 1]; double step_x = step_size * sin_angle; double step_y = step_size * cos_angle; int pos = is + ia * param.ns; double x = begins[2*pos]; double y = begins[2*pos + 1]; double sum = 0; int ix1, ix2, iy1, iy2; double u, v; for (int ray_index = 0; ray_index < nsteps[pos]; ++ray_index) { if (x >= 0 && x <= param.nx-1 && y >= 0 && y <= param.ny-1){ // 2-D linear interpolation ix1 = floor(x); ix2 = ceil(x); // use ceil instead of ix1+1 to suit to x==nx-1 u = x - ix1; iy1 = floor(y); iy2 = ceil(y); v = y - iy1; sum += (1-v) * ((1-u)*img[ix1+iy1*param.nx] + u*img[ix2+iy1*param.nx]) + v * ((1-u)*img[ix1+iy2*param.nx] + u*img[ix2+iy2*param.nx]); } x += step_x; y += step_y; } proj[pos] = sum * step_size * param.dx; } return; } template <> bool ParallelProjection2DRayCasting<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* begins, const int* nsteps, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayCastingKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, begins, nsteps, param_, step_size_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelProjection2DRayCasting<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* begins, const int* nsteps, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayCastingKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, begins, nsteps, param_, step_size_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } __global__ void ParallelProjection2DRayDrivenPrepareKernel(double* sincostbl, double* beginoffset, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id, is; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; while (angle < 0) angle += 2 * M_PI; while (angle >= 2 * M_PI) angle -= 2 * M_PI; bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex ? 1 : 0; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // variables double begin, offset, offset2; double* begin_ptr = beginoffset + 2*ia*param.ns; if (b_usex) { offset = param.dy * sin_angle / (cos_angle * param.dx); offset2 = param.ds / (cos_angle * param.dx); begin = centx - centy*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; // next channel begin += offset2; begin_ptr += 2; } } else { offset = param.dx * cos_angle / (sin_angle * param.dy); offset2 = -param.ds / (sin_angle * param.dy); begin = centy - centx*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; // next channel begin += offset2; begin_ptr += 2; } } } return; } bool ParallelProjection2DRayDrivenPrepare::calculate_on_gpu(double* sincostbl, double* beginoffset, int* usex, hipStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenPrepareKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, sincostbl, beginoffset, usex, param_, param_.na); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelProjection2DRayDrivenKernel(const T* img, T* proj, const double* sincostbl, const double* beginoffset, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id / param.ns; int is = thread_id % param.ns; bool b_usex = usex[ia]; int pos = is + ia * param.ns; double begin = beginoffset[2*pos]; double offset = beginoffset[2*pos+1]; T sum = 0; if (b_usex) { double x = begin, u; int ix1, ix2, iy; for (iy = 0; iy < param.ny; ++iy) { if (x >= 0 && x <= param.nx-1) { // linear interpolation ix1 = static_cast<int>(floor(x)); ix2 = static_cast<int>(ceil(x)); u = x - ix1; sum += (1-u) * img[ix1 + iy*param.nx] + u * img[ix2 + iy*param.nx]; } // next row x += offset; } proj[pos] = sum * fabs(param.dy / sincostbl[2*ia + 1]); } else { double y = begin, u; int ix, iy1, iy2; for (ix = 0; ix < param.nx; ++ix) { if (y >= 0 && y <= param.ny) { //linear interpolation iy1 = static_cast<int>(floor(y)); iy2 = static_cast<int>(ceil(y)); u = y - iy1; sum += (1-u) * img[ix + iy1*param.nx] + u * img[ix + iy2*param.nx]; } // next column y += offset; } proj[pos] = sum * fabs(param.dx / sincostbl[2*ia]); } } return; } template <> bool ParallelProjection2DRayDriven<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* beginoffset, const int* usex, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, beginoffset, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelProjection2DRayDriven<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* beginoffset, const int* usex, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, beginoffset, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } __global__ void ParallelProjection2DRayDrivenGradPrepKernel(double* weights, double* pos, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int ia = thread_id; double cos_angle, sin_angle; // we use nx for both nx and ny here, so please make sure nx==ny double* pos_ptr = pos + 2*ia*param.nx; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // begin calculation double angle = param.orbit_start + ia * param.orbit; sin_angle = sin(angle); cos_angle = cos(angle); bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex; weights[ia] = fabs(b_usex ? param.dy / cos_angle : param.dx / sin_angle); double temp1 = param.dx * cos_angle / param.ds; double temp2 = -param.dy * sin_angle / param.ds; double offset1 = b_usex ? temp1 : temp2; double offset2 = b_usex ? temp2 : temp1; double begin = cents - centx * temp1 - centy * temp2; // again, we use nx here for both nx and ny to reduce code amount for (unsigned int i = 0; i < param.nx; ++i) { pos_ptr[0] = begin; pos_ptr[1] = offset1; begin += offset2; pos_ptr += 2; } } return; } bool ParallelProjection2DRayDrivenGradPrep::calculate_on_gpu(double* weights, double* pos, int* usex, hipStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenGradPrepKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, weights, pos, usex, param_, param_.na); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelProjection2DRayDrivenGradKernel(const T* proj, T* img, const double* weights, const double* pos, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int iy = thread_id / param.nx; int ix = thread_id % param.nx; unsigned int ia, is; bool b_usex; double begin, offset, mid, left, right, temp; int i_hori, i_vert; const double* weight_ptr = weights; const double* pos_ptr = pos; const int* usex_ptr = usex; const T* proj_ptr = proj; // begin calculation double sum = 0.0, tempsum; for (ia = 0; ia < param.na; ++ia) { b_usex = *usex_ptr; i_hori = b_usex ? ix : iy; i_vert = b_usex ? iy : ix; // calculate corresponding ray range begin = pos_ptr[i_vert<<1]; offset = pos_ptr[(i_vert<<1) | 1]; mid = begin + offset * i_hori; left = i_hori==0 ? mid : mid-offset; right = i_hori==param.nx-1 ? mid : mid+offset; // make sure left <= right if (left > right) { temp = left; left = right; right = temp; } // accumulate values within the range tempsum = 0.0; for (is = ceil(left); is <= right; ++is) { tempsum += (1-abs((mid-static_cast<double>(is)) / offset)) * proj_ptr[is]; } sum += tempsum * (*weight_ptr); // next angle ++usex_ptr; ++weight_ptr; proj_ptr += param.ns; pos_ptr += 2*param.nx; } // write to image img[ix + iy*param.nx] = sum; } return; } template <> bool ParallelProjection2DRayDrivenGrad<float>::calculate_on_gpu(const float* proj, float* img, const double* weights, const double* pos, const int* usex, hipStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenGradKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, weights, pos, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelProjection2DRayDrivenGrad<double>::calculate_on_gpu(const double* proj, double* img, const double* weights, const double* pos, const int* usex, hipStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DRayDrivenGradKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, weights, pos, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } __global__ void ParallelProjection2DDisDrivenPrepKernel(double* sincostbl, double* beginoffset, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id, is; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; while (angle < 0) angle += 2 * M_PI; while (angle >= 2 * M_PI) angle -= 2 * M_PI; bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex ? 1 : 0; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // variables double begin, offset, offset2; double* begin_ptr = beginoffset + 3*ia*param.ns; if (b_usex) { offset = param.dy * sin_angle / (cos_angle * param.dx); offset2 = param.ds / (cos_angle * param.dx); begin = centx - centy*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; begin_ptr[2] = offset2 / 2; // next channel begin += offset2; begin_ptr += 3; } } else { offset = param.dx * cos_angle / (sin_angle * param.dy); offset2 = -param.ds / (sin_angle * param.dy); begin = centy - centx*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; begin_ptr[2] = offset2 / 2; // next channel begin += offset2; begin_ptr += 3; } } } return; } bool ParallelProjection2DDisDrivenPrep::calculate_on_gpu(double* sincostbl, double* beginoffset, int* usex, hipStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenPrepKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, sincostbl, beginoffset, usex, param_, param_.na); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelProjection2DDisDrivenKernel(const T* img, T* proj, const double* sincostbl, const double* beginoffset, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id / param.ns; int ix, iy; bool b_usex = usex[ia]; double begin = beginoffset[3*thread_id]; double offset = beginoffset[3*thread_id+1]; double offset2 = beginoffset[3*thread_id+2] / 2; double left = begin - offset2; double right = begin + offset2; if (left > right) { double temp = left; left = right; right = temp; } double length; T sum = 0, tsum, lsum; if (b_usex) { for (iy = 0; iy < param.ny; ++iy) { tsum = 0; lsum = 0; for (ix = floor(left); ix < ceil(right); ++ix) { if (ix < 0 || ix >= param.nx-1) continue; length = MIN(ix+1, right) - MAX(ix, left); tsum += length * (img[ix+iy*param.nx] + img[ix+1+iy*param.nx]) / 2; lsum += length; } if (lsum > 0) sum += tsum / lsum; // next row left += offset; right += offset; } proj[thread_id] = sum * fabs(param.dy / sincostbl[2*ia + 1]); } else { for (ix = 0; ix < param.nx; ++ix) { tsum = 0; lsum = 0; for (iy = floor(left); iy < ceil(right); ++iy) { if (iy < 0 || iy >= param.ny-1) continue; length = MIN(iy+1, right) - MAX(iy, left); tsum += length * (img[ix+iy*param.nx] + img[ix+(iy+1)*param.nx]) / 2; lsum += length; } if (lsum > 0) sum += tsum / lsum; // next row left += offset; right += offset; } proj[thread_id] = sum * fabs(param.dx / sincostbl[2*ia]); } } return; } template <> bool ParallelProjection2DDisDriven<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* beginoffset, const int* usex, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, beginoffset, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelProjection2DDisDriven<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* beginoffset, const int* usex, hipStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, img, proj, sincostbl, beginoffset, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } __global__ void ParallelProjection2DDisDrivenGradPrepKernel(double* weights, double* pos, int* usex, const ParallelProjection2DParam param, const int n_elements) { // the same as the Ray-Driven one for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int ia = thread_id; double cos_angle, sin_angle; // we use nx for both nx and ny here, so please make sure nx==ny double* pos_ptr = pos + 2*ia*param.nx; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // begin calculation double angle = param.orbit_start + ia * param.orbit; sin_angle = sin(angle); cos_angle = cos(angle); bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex; weights[ia] = fabs(b_usex ? param.dy / cos_angle : param.dx / sin_angle); double temp1 = param.dx * cos_angle / param.ds; double temp2 = -param.dy * sin_angle / param.ds; double offset1 = b_usex ? temp1 : temp2; double offset2 = b_usex ? temp2 : temp1; double begin = cents - centx * temp1 - centy * temp2; // again, we use nx here for both nx and ny to reduce code amount for (unsigned int i = 0; i < param.nx; ++i) { pos_ptr[0] = begin; pos_ptr[1] = offset1; begin += offset2; pos_ptr += 2; } } return; } bool ParallelProjection2DDisDrivenGradPrep::calculate_on_gpu(double* weights, double* pos, int* usex, hipStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenGradPrepKernel) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, weights, pos, usex, param_, param_.na); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <typename T> __global__ void ParallelProjection2DDisDrivenGradKernel(const T* proj, T* img, const double* weights, const double* pos, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int iy = thread_id / param.nx; int ix = thread_id % param.nx; unsigned int ia, is; bool b_usex; double begin, offset, mid, left, right, temp; int i_hori, i_vert; const double* weight_ptr = weights; const double* pos_ptr = pos; const int* usex_ptr = usex; const T* proj_ptr = proj; // begin calculation double sum = 0.0, tempsum, length; for (ia = 0; ia < param.na; ++ia) { b_usex = *usex_ptr; i_hori = b_usex ? ix : iy; i_vert = b_usex ? iy : ix; // calculate corresponding ray range begin = pos_ptr[i_vert<<1]; offset = pos_ptr[(i_vert<<1) | 1]; mid = begin + offset * i_hori; left = i_hori==0 ? mid : mid-offset; right = i_hori==param.nx-1 ? mid : mid+offset; // make sure left <= right if (left > right) { temp = left; left = right; right = temp; } // accumulate values within the range tempsum = 0.0; for (is = ceil(left - 0.5); is <= floor(right + 0.5); ++is) { length = MIN(static_cast<double>(is) + 0.5, right) - MAX(static_cast<double>(is) - 0.5, left); tempsum += length * proj_ptr[is] / 2; } sum += tempsum * (*weight_ptr); // next angle ++usex_ptr; ++weight_ptr; proj_ptr += param.ns; pos_ptr += 2*param.nx; } // write to image img[ix + iy*param.nx] = sum; } return; } template <> bool ParallelProjection2DDisDrivenGrad<float>::calculate_on_gpu(const float* proj, float* img, const double* weights, const double* pos, const int* usex, hipStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenGradKernel<float>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, weights, pos, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } template <> bool ParallelProjection2DDisDrivenGrad<double>::calculate_on_gpu(const double* proj, double* img, const double* weights, const double* pos, const int* usex, hipStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); hipLaunchKernelGGL(( ParallelProjection2DDisDrivenGradKernel<double>) , dim3(config.block_count), dim3(config.thread_per_block), 0, stream, proj, img, weights, pos, usex, param_, n_elements); hipError_t err = hipDeviceSynchronize(); return err==hipSuccess; } //#endif } // namespace ct_recon
7f6c13c9a1785262b960f555b2b4e487258764d5.cu
/* * @Description: GPU implementation of fp_par_2d.h * @Author: Tianling Lyu * @Date: 2019-11-13 14:42:30 * @LastEditors: Tianling Lyu * @LastEditTime: 2019-12-09 11:50:37 */ #include "include/fp_par_2d.h" #define _USE_MATH_DEFINES #include <cmath> #include "cuda/cuda_common.h" #ifndef M_PI #define M_PI 3.14159265358979323846264338327950288 #define M_PI_4 M_PI/4 #endif #ifndef MAX #define MAX(x, y) ((x) > (y) ? (x) : (y)) #endif #ifndef MIN #define MIN(x, y) ((x) > (y) ? (y) : (x)) #endif namespace ct_recon { //#if USE_CUDA __global__ void ParallelProjection2DRayCastingPrepareKernel(double* sincostbl, double* begins, int* nsteps, const ParallelProjection2DParam param, const double step_size, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; // useful constants const double x_center = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double y_center = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double fov_squ = param.fov*param.fov; const double s_begin = -(static_cast<double>(param.ns-1) / 2.0 + param.offset_s) * param.ds; const bool usex = sin_angle > (1.0/1.414); const double step_x = step_size * sin_angle; const double step_y = step_size * cos_angle; // variables double s = s_begin, x1, y1, x2, y2; double half_subtense_squ, half_subtense; double* begin_ptr = begins + 2*ia*param.ns; int* nstep_ptr = nsteps + ia*param.ns; for (int is = 0; is < param.ns; ++is) { half_subtense_squ = fov_squ - s*s; if (half_subtense_squ > 0) { half_subtense = sqrt(half_subtense_squ); // intersection points x1 = (s*cos_angle - half_subtense*sin_angle) / param.dx + x_center; y1 = -(s*sin_angle + half_subtense*cos_angle) / param.dy + y_center; x2 = (s*cos_angle + half_subtense*sin_angle) / param.dx + x_center; y2 = -(s*sin_angle - half_subtense*cos_angle) / param.dy + y_center; if (usex) *nstep_ptr = static_cast<int>((x2 - x1) / step_x); else *nstep_ptr = static_cast<int>((y2 - y1) / step_y); // store results begin_ptr[0] = x1; begin_ptr[1] = y1; } else { // not intersected with FoV *nstep_ptr = 0; begin_ptr[0] = 0; begin_ptr[1] = 0; } // next channel begin_ptr += 2; ++nstep_ptr; s += param.ds; } } return; } bool ParallelProjection2DRayCastingPrepare::calculate_on_gpu(double* sincostbl, double* begins, int* nsteps, cudaStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); ParallelProjection2DRayCastingPrepareKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (sincostbl, begins, nsteps, param_, step_size_, param_.na); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelProjection2DRayCastingKernel(const T* img, T* proj, const double* sincostbl, const double* begins, const int* nsteps, const ParallelProjection2DParam param, const double step_size, const int n_elements) { for (int thread_id : CudaGridRangeX(n_elements)) { int ia = thread_id / param.ns; int is = thread_id % param.ns; double sin_angle = sincostbl[2*ia]; double cos_angle = sincostbl[2*ia + 1]; double step_x = step_size * sin_angle; double step_y = step_size * cos_angle; int pos = is + ia * param.ns; double x = begins[2*pos]; double y = begins[2*pos + 1]; double sum = 0; int ix1, ix2, iy1, iy2; double u, v; for (int ray_index = 0; ray_index < nsteps[pos]; ++ray_index) { if (x >= 0 && x <= param.nx-1 && y >= 0 && y <= param.ny-1){ // 2-D linear interpolation ix1 = floor(x); ix2 = ceil(x); // use ceil instead of ix1+1 to suit to x==nx-1 u = x - ix1; iy1 = floor(y); iy2 = ceil(y); v = y - iy1; sum += (1-v) * ((1-u)*img[ix1+iy1*param.nx] + u*img[ix2+iy1*param.nx]) + v * ((1-u)*img[ix1+iy2*param.nx] + u*img[ix2+iy2*param.nx]); } x += step_x; y += step_y; } proj[pos] = sum * step_size * param.dx; } return; } template <> bool ParallelProjection2DRayCasting<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* begins, const int* nsteps, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayCastingKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, begins, nsteps, param_, step_size_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelProjection2DRayCasting<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* begins, const int* nsteps, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayCastingKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, begins, nsteps, param_, step_size_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } __global__ void ParallelProjection2DRayDrivenPrepareKernel(double* sincostbl, double* beginoffset, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id, is; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; while (angle < 0) angle += 2 * M_PI; while (angle >= 2 * M_PI) angle -= 2 * M_PI; bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex ? 1 : 0; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // variables double begin, offset, offset2; double* begin_ptr = beginoffset + 2*ia*param.ns; if (b_usex) { offset = param.dy * sin_angle / (cos_angle * param.dx); offset2 = param.ds / (cos_angle * param.dx); begin = centx - centy*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; // next channel begin += offset2; begin_ptr += 2; } } else { offset = param.dx * cos_angle / (sin_angle * param.dy); offset2 = -param.ds / (sin_angle * param.dy); begin = centy - centx*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; // next channel begin += offset2; begin_ptr += 2; } } } return; } bool ParallelProjection2DRayDrivenPrepare::calculate_on_gpu(double* sincostbl, double* beginoffset, int* usex, cudaStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); ParallelProjection2DRayDrivenPrepareKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (sincostbl, beginoffset, usex, param_, param_.na); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelProjection2DRayDrivenKernel(const T* img, T* proj, const double* sincostbl, const double* beginoffset, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id / param.ns; int is = thread_id % param.ns; bool b_usex = usex[ia]; int pos = is + ia * param.ns; double begin = beginoffset[2*pos]; double offset = beginoffset[2*pos+1]; T sum = 0; if (b_usex) { double x = begin, u; int ix1, ix2, iy; for (iy = 0; iy < param.ny; ++iy) { if (x >= 0 && x <= param.nx-1) { // linear interpolation ix1 = static_cast<int>(floor(x)); ix2 = static_cast<int>(ceil(x)); u = x - ix1; sum += (1-u) * img[ix1 + iy*param.nx] + u * img[ix2 + iy*param.nx]; } // next row x += offset; } proj[pos] = sum * fabs(param.dy / sincostbl[2*ia + 1]); } else { double y = begin, u; int ix, iy1, iy2; for (ix = 0; ix < param.nx; ++ix) { if (y >= 0 && y <= param.ny) { //linear interpolation iy1 = static_cast<int>(floor(y)); iy2 = static_cast<int>(ceil(y)); u = y - iy1; sum += (1-u) * img[ix + iy1*param.nx] + u * img[ix + iy2*param.nx]; } // next column y += offset; } proj[pos] = sum * fabs(param.dx / sincostbl[2*ia]); } } return; } template <> bool ParallelProjection2DRayDriven<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* beginoffset, const int* usex, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayDrivenKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, beginoffset, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelProjection2DRayDriven<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* beginoffset, const int* usex, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayDrivenKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, beginoffset, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } __global__ void ParallelProjection2DRayDrivenGradPrepKernel(double* weights, double* pos, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int ia = thread_id; double cos_angle, sin_angle; // we use nx for both nx and ny here, so please make sure nx==ny double* pos_ptr = pos + 2*ia*param.nx; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // begin calculation double angle = param.orbit_start + ia * param.orbit; sin_angle = sin(angle); cos_angle = cos(angle); bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex; weights[ia] = fabs(b_usex ? param.dy / cos_angle : param.dx / sin_angle); double temp1 = param.dx * cos_angle / param.ds; double temp2 = -param.dy * sin_angle / param.ds; double offset1 = b_usex ? temp1 : temp2; double offset2 = b_usex ? temp2 : temp1; double begin = cents - centx * temp1 - centy * temp2; // again, we use nx here for both nx and ny to reduce code amount for (unsigned int i = 0; i < param.nx; ++i) { pos_ptr[0] = begin; pos_ptr[1] = offset1; begin += offset2; pos_ptr += 2; } } return; } bool ParallelProjection2DRayDrivenGradPrep::calculate_on_gpu(double* weights, double* pos, int* usex, cudaStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); ParallelProjection2DRayDrivenGradPrepKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (weights, pos, usex, param_, param_.na); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelProjection2DRayDrivenGradKernel(const T* proj, T* img, const double* weights, const double* pos, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int iy = thread_id / param.nx; int ix = thread_id % param.nx; unsigned int ia, is; bool b_usex; double begin, offset, mid, left, right, temp; int i_hori, i_vert; const double* weight_ptr = weights; const double* pos_ptr = pos; const int* usex_ptr = usex; const T* proj_ptr = proj; // begin calculation double sum = 0.0, tempsum; for (ia = 0; ia < param.na; ++ia) { b_usex = *usex_ptr; i_hori = b_usex ? ix : iy; i_vert = b_usex ? iy : ix; // calculate corresponding ray range begin = pos_ptr[i_vert<<1]; offset = pos_ptr[(i_vert<<1) | 1]; mid = begin + offset * i_hori; left = i_hori==0 ? mid : mid-offset; right = i_hori==param.nx-1 ? mid : mid+offset; // make sure left <= right if (left > right) { temp = left; left = right; right = temp; } // accumulate values within the range tempsum = 0.0; for (is = ceil(left); is <= right; ++is) { tempsum += (1-abs((mid-static_cast<double>(is)) / offset)) * proj_ptr[is]; } sum += tempsum * (*weight_ptr); // next angle ++usex_ptr; ++weight_ptr; proj_ptr += param.ns; pos_ptr += 2*param.nx; } // write to image img[ix + iy*param.nx] = sum; } return; } template <> bool ParallelProjection2DRayDrivenGrad<float>::calculate_on_gpu(const float* proj, float* img, const double* weights, const double* pos, const int* usex, cudaStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayDrivenGradKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, weights, pos, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelProjection2DRayDrivenGrad<double>::calculate_on_gpu(const double* proj, double* img, const double* weights, const double* pos, const int* usex, cudaStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DRayDrivenGradKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, weights, pos, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } __global__ void ParallelProjection2DDisDrivenPrepKernel(double* sincostbl, double* beginoffset, int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id, is; double angle = param.orbit_start + ia * param.orbit; double sin_angle = sin(angle); double cos_angle = cos(angle); sincostbl[2*ia] = sin_angle; sincostbl[2*ia + 1] = cos_angle; while (angle < 0) angle += 2 * M_PI; while (angle >= 2 * M_PI) angle -= 2 * M_PI; bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex ? 1 : 0; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // variables double begin, offset, offset2; double* begin_ptr = beginoffset + 3*ia*param.ns; if (b_usex) { offset = param.dy * sin_angle / (cos_angle * param.dx); offset2 = param.ds / (cos_angle * param.dx); begin = centx - centy*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; begin_ptr[2] = offset2 / 2; // next channel begin += offset2; begin_ptr += 3; } } else { offset = param.dx * cos_angle / (sin_angle * param.dy); offset2 = -param.ds / (sin_angle * param.dy); begin = centy - centx*offset - cents*offset2; for (is = 0; is < param.ns; ++is) { begin_ptr[0] = begin; begin_ptr[1] = offset; begin_ptr[2] = offset2 / 2; // next channel begin += offset2; begin_ptr += 3; } } } return; } bool ParallelProjection2DDisDrivenPrep::calculate_on_gpu(double* sincostbl, double* beginoffset, int* usex, cudaStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); ParallelProjection2DDisDrivenPrepKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (sincostbl, beginoffset, usex, param_, param_.na); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelProjection2DDisDrivenKernel(const T* img, T* proj, const double* sincostbl, const double* beginoffset, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { int ia = thread_id / param.ns; int ix, iy; bool b_usex = usex[ia]; double begin = beginoffset[3*thread_id]; double offset = beginoffset[3*thread_id+1]; double offset2 = beginoffset[3*thread_id+2] / 2; double left = begin - offset2; double right = begin + offset2; if (left > right) { double temp = left; left = right; right = temp; } double length; T sum = 0, tsum, lsum; if (b_usex) { for (iy = 0; iy < param.ny; ++iy) { tsum = 0; lsum = 0; for (ix = floor(left); ix < ceil(right); ++ix) { if (ix < 0 || ix >= param.nx-1) continue; length = MIN(ix+1, right) - MAX(ix, left); tsum += length * (img[ix+iy*param.nx] + img[ix+1+iy*param.nx]) / 2; lsum += length; } if (lsum > 0) sum += tsum / lsum; // next row left += offset; right += offset; } proj[thread_id] = sum * fabs(param.dy / sincostbl[2*ia + 1]); } else { for (ix = 0; ix < param.nx; ++ix) { tsum = 0; lsum = 0; for (iy = floor(left); iy < ceil(right); ++iy) { if (iy < 0 || iy >= param.ny-1) continue; length = MIN(iy+1, right) - MAX(iy, left); tsum += length * (img[ix+iy*param.nx] + img[ix+(iy+1)*param.nx]) / 2; lsum += length; } if (lsum > 0) sum += tsum / lsum; // next row left += offset; right += offset; } proj[thread_id] = sum * fabs(param.dx / sincostbl[2*ia]); } } return; } template <> bool ParallelProjection2DDisDriven<float>::calculate_on_gpu(const float* img, float* proj, const double* sincostbl, const double* beginoffset, const int* usex, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DDisDrivenKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, beginoffset, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelProjection2DDisDriven<double>::calculate_on_gpu(const double* img, double* proj, const double* sincostbl, const double* beginoffset, const int* usex, cudaStream_t stream) const { int n_elements = param_.na*param_.ns; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DDisDrivenKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (img, proj, sincostbl, beginoffset, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } __global__ void ParallelProjection2DDisDrivenGradPrepKernel(double* weights, double* pos, int* usex, const ParallelProjection2DParam param, const int n_elements) { // the same as the Ray-Driven one for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int ia = thread_id; double cos_angle, sin_angle; // we use nx for both nx and ny here, so please make sure nx==ny double* pos_ptr = pos + 2*ia*param.nx; // useful constants const double centx = static_cast<double>(param.nx-1) / 2.0 + param.offset_x; const double centy = static_cast<double>(param.ny-1) / 2.0 + param.offset_y; const double cents = static_cast<double>(param.ns-1) / 2.0 + param.offset_s; // begin calculation double angle = param.orbit_start + ia * param.orbit; sin_angle = sin(angle); cos_angle = cos(angle); bool b_usex = !((angle >= M_PI_4 && angle < 3*M_PI_4) || (angle >= 5*M_PI_4 && angle < 7*M_PI_4)); usex[ia] = b_usex; weights[ia] = fabs(b_usex ? param.dy / cos_angle : param.dx / sin_angle); double temp1 = param.dx * cos_angle / param.ds; double temp2 = -param.dy * sin_angle / param.ds; double offset1 = b_usex ? temp1 : temp2; double offset2 = b_usex ? temp2 : temp1; double begin = cents - centx * temp1 - centy * temp2; // again, we use nx here for both nx and ny to reduce code amount for (unsigned int i = 0; i < param.nx; ++i) { pos_ptr[0] = begin; pos_ptr[1] = offset1; begin += offset2; pos_ptr += 2; } } return; } bool ParallelProjection2DDisDrivenGradPrep::calculate_on_gpu(double* weights, double* pos, int* usex, cudaStream_t stream) const { CudaLaunchConfig config = GetCudaLaunchConfig(param_.na); ParallelProjection2DDisDrivenGradPrepKernel <<<config.block_count, config.thread_per_block, 0, stream>>> (weights, pos, usex, param_, param_.na); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <typename T> __global__ void ParallelProjection2DDisDrivenGradKernel(const T* proj, T* img, const double* weights, const double* pos, const int* usex, const ParallelProjection2DParam param, const int n_elements) { for (int thread_id : CudaGridRangeX<int>(n_elements)) { // declare variables int iy = thread_id / param.nx; int ix = thread_id % param.nx; unsigned int ia, is; bool b_usex; double begin, offset, mid, left, right, temp; int i_hori, i_vert; const double* weight_ptr = weights; const double* pos_ptr = pos; const int* usex_ptr = usex; const T* proj_ptr = proj; // begin calculation double sum = 0.0, tempsum, length; for (ia = 0; ia < param.na; ++ia) { b_usex = *usex_ptr; i_hori = b_usex ? ix : iy; i_vert = b_usex ? iy : ix; // calculate corresponding ray range begin = pos_ptr[i_vert<<1]; offset = pos_ptr[(i_vert<<1) | 1]; mid = begin + offset * i_hori; left = i_hori==0 ? mid : mid-offset; right = i_hori==param.nx-1 ? mid : mid+offset; // make sure left <= right if (left > right) { temp = left; left = right; right = temp; } // accumulate values within the range tempsum = 0.0; for (is = ceil(left - 0.5); is <= floor(right + 0.5); ++is) { length = MIN(static_cast<double>(is) + 0.5, right) - MAX(static_cast<double>(is) - 0.5, left); tempsum += length * proj_ptr[is] / 2; } sum += tempsum * (*weight_ptr); // next angle ++usex_ptr; ++weight_ptr; proj_ptr += param.ns; pos_ptr += 2*param.nx; } // write to image img[ix + iy*param.nx] = sum; } return; } template <> bool ParallelProjection2DDisDrivenGrad<float>::calculate_on_gpu(const float* proj, float* img, const double* weights, const double* pos, const int* usex, cudaStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DDisDrivenGradKernel<float> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, weights, pos, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } template <> bool ParallelProjection2DDisDrivenGrad<double>::calculate_on_gpu(const double* proj, double* img, const double* weights, const double* pos, const int* usex, cudaStream_t stream) const { int n_elements = param_.nx*param_.ny; CudaLaunchConfig config = GetCudaLaunchConfig(n_elements); ParallelProjection2DDisDrivenGradKernel<double> <<<config.block_count, config.thread_per_block, 0, stream>>> (proj, img, weights, pos, usex, param_, n_elements); cudaError_t err = cudaDeviceSynchronize(); return err==cudaSuccess; } //#endif } // namespace ct_recon
a1c8c3a859e3d99eda034d6f200989c6a65943a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "mesh_belonging.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/utils.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace MeshBelongingKernels { const real tolerance = 1e-6_r; /// https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm __device__ static inline bool doesRayIntersectTriangle( real3 rayOrigin, real3 rayVector, real3 v0, real3 v1, real3 v2) { real3 edge1, edge2, h, s, q; real a,f,u,v; edge1 = v1 - v0; edge2 = v2 - v0; h = cross(rayVector, edge2); a = dot(edge1, h); if (math::abs(a) < tolerance) return false; f = 1.0_r / a; s = rayOrigin - v0; u = f * (dot(s, h)); if (u < 0.0_r || u > 1.0_r) return false; q = cross(s, edge1); v = f * dot(rayVector, q); if (v < 0.0_r || u + v > 1.0_r) return false; // At this stage we can compute t to find out where the intersection point is on the line. real t = f * dot(edge2, q); if (t > tolerance) // ray intersection return true; else return false; // This means that there is a line intersection but not a ray intersection. } __device__ static inline real3 fetchPosition(const real4 *vertices, int i) { auto v = vertices[i]; return {v.x, v.y, v.z}; } /** * One warp works on one particle */ __device__ static inline BelongingTags oneParticleInsideMesh(int pid, real3 r, int objId, const real3 com, const MeshView mesh, const real4* vertices) { // Work in obj reference frame for simplicity r = r - com; // shoot 3 rays in different directions, count intersections constexpr int nRays = 3; constexpr real3 rays[nRays] = { {0,1,0}, {0,1,0}, {0,1,0} }; int counters[nRays] = {0, 0, 0}; for (int i = laneId(); i < mesh.ntriangles; i += warpSize) { int3 trid = mesh.triangles[i]; real3 v0 = fetchPosition(vertices, objId*mesh.nvertices + trid.x) - com; real3 v1 = fetchPosition(vertices, objId*mesh.nvertices + trid.y) - com; real3 v2 = fetchPosition(vertices, objId*mesh.nvertices + trid.z) - com; for (int c = 0; c < nRays; c++) if (doesRayIntersectTriangle(r, rays[c], v0, v1, v2)) counters[c]++; } // counter is odd if the particle is inside // however, realing-point precision sometimes yields in errors // so we choose what the majority(!) of the rays say int intersecting = 0; for (int c = 0; c < nRays; c++) { counters[c] = warpReduce(counters[c], [] (int a, int b) { return a+b; }); if ( (counters[c] % 2) != 0 ) intersecting++; } if (intersecting > (nRays/2)) return BelongingTags::Inside; else return BelongingTags::Outside; } /** * OVview view is only used to provide # of objects and extent information * Actual data is in \p vertices * @param cinfo is the cell-list sync'd with the target ParticleVector data */ template<int WARPS_PER_OBJ> __global__ void insideMesh(const OVview ovView, const MeshView mesh, const real4 *vertices, CellListInfo cinfo, PVview pvView, BelongingTags* tags) { const int gid = blockIdx.x*blockDim.x + threadIdx.x; const int wid = gid / warpSize; const int objId = wid / WARPS_PER_OBJ; const int locWid = wid % WARPS_PER_OBJ; if (objId >= ovView.nObjects) return; const int3 cidLow = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].low - 0.5_r); const int3 cidHigh = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].high + 0.5_r); const int3 span = cidHigh - cidLow + make_int3(1,1,1); const int totCells = span.x * span.y * span.z; for (int i = locWid; i < totCells; i += WARPS_PER_OBJ) { const int3 cid3 = make_int3( i % span.x, (i/span.x) % span.y, i / (span.x*span.y) ) + cidLow; const int cid = cinfo.encode(cid3); if (cid < 0 || cid >= cinfo.totcells) continue; int pstart = cinfo.cellStarts[cid]; int pend = cinfo.cellStarts[cid+1]; #pragma unroll 3 for (int pid = pstart; pid < pend; pid++) { const Particle p(pvView.readParticle(pid)); auto tag = oneParticleInsideMesh(pid, p.r, objId, ovView.comAndExtents[objId].com, mesh, vertices); // Only tag particles inside, default is outside anyways if (laneId() == 0 && tag != BelongingTags::Outside) tags[pid] = tag; } } } } // namespace MeshBelongingKernels void MeshBelongingChecker::_tagInner(ParticleVector *pv, CellList *cl, hipStream_t stream) { tags_.resize_anew(pv->local()->size()); tags_.clearDevice(stream); auto computeTags = [&](ParticleVectorLocality locality) { ov_->findExtentAndCOM(stream, locality); auto lov = ov_->get(locality); auto view = OVview(ov_, lov); auto vertices = lov->getMeshVertices(stream); auto meshView = MeshView(ov_->mesh.get()); debug("Computing inside/outside tags (against mesh) for %d %s objects '%s' and %d '%s' particles", view.nObjects, getParticleVectorLocalityStr(locality).c_str(), ov_->getCName(), pv->local()->size(), pv->getCName()); constexpr int nthreads = 128; constexpr int warpsPerObject = 1024; SAFE_KERNEL_LAUNCH( MeshBelongingKernels::insideMesh<warpsPerObject>, getNblocks(warpsPerObject*32*view.nObjects, nthreads), nthreads, 0, stream, view, meshView, reinterpret_cast<real4*>(vertices->devPtr()), cl->cellInfo(), cl->getView<PVview>(), tags_.devPtr()); }; computeTags(ParticleVectorLocality::Local); computeTags(ParticleVectorLocality::Halo); } } // namespace mirheo
a1c8c3a859e3d99eda034d6f200989c6a65943a6.cu
#include "mesh_belonging.h" #include <mirheo/core/celllist.h> #include <mirheo/core/pvs/object_vector.h> #include <mirheo/core/pvs/views/ov.h> #include <mirheo/core/rigid/utils.h> #include <mirheo/core/utils/kernel_launch.h> #include <mirheo/core/utils/quaternion.h> namespace mirheo { namespace MeshBelongingKernels { const real tolerance = 1e-6_r; /// https://en.wikipedia.org/wiki/M%C3%B6ller%E2%80%93Trumbore_intersection_algorithm __device__ static inline bool doesRayIntersectTriangle( real3 rayOrigin, real3 rayVector, real3 v0, real3 v1, real3 v2) { real3 edge1, edge2, h, s, q; real a,f,u,v; edge1 = v1 - v0; edge2 = v2 - v0; h = cross(rayVector, edge2); a = dot(edge1, h); if (math::abs(a) < tolerance) return false; f = 1.0_r / a; s = rayOrigin - v0; u = f * (dot(s, h)); if (u < 0.0_r || u > 1.0_r) return false; q = cross(s, edge1); v = f * dot(rayVector, q); if (v < 0.0_r || u + v > 1.0_r) return false; // At this stage we can compute t to find out where the intersection point is on the line. real t = f * dot(edge2, q); if (t > tolerance) // ray intersection return true; else return false; // This means that there is a line intersection but not a ray intersection. } __device__ static inline real3 fetchPosition(const real4 *vertices, int i) { auto v = vertices[i]; return {v.x, v.y, v.z}; } /** * One warp works on one particle */ __device__ static inline BelongingTags oneParticleInsideMesh(int pid, real3 r, int objId, const real3 com, const MeshView mesh, const real4* vertices) { // Work in obj reference frame for simplicity r = r - com; // shoot 3 rays in different directions, count intersections constexpr int nRays = 3; constexpr real3 rays[nRays] = { {0,1,0}, {0,1,0}, {0,1,0} }; int counters[nRays] = {0, 0, 0}; for (int i = laneId(); i < mesh.ntriangles; i += warpSize) { int3 trid = mesh.triangles[i]; real3 v0 = fetchPosition(vertices, objId*mesh.nvertices + trid.x) - com; real3 v1 = fetchPosition(vertices, objId*mesh.nvertices + trid.y) - com; real3 v2 = fetchPosition(vertices, objId*mesh.nvertices + trid.z) - com; for (int c = 0; c < nRays; c++) if (doesRayIntersectTriangle(r, rays[c], v0, v1, v2)) counters[c]++; } // counter is odd if the particle is inside // however, realing-point precision sometimes yields in errors // so we choose what the majority(!) of the rays say int intersecting = 0; for (int c = 0; c < nRays; c++) { counters[c] = warpReduce(counters[c], [] (int a, int b) { return a+b; }); if ( (counters[c] % 2) != 0 ) intersecting++; } if (intersecting > (nRays/2)) return BelongingTags::Inside; else return BelongingTags::Outside; } /** * OVview view is only used to provide # of objects and extent information * Actual data is in \p vertices * @param cinfo is the cell-list sync'd with the target ParticleVector data */ template<int WARPS_PER_OBJ> __global__ void insideMesh(const OVview ovView, const MeshView mesh, const real4 *vertices, CellListInfo cinfo, PVview pvView, BelongingTags* tags) { const int gid = blockIdx.x*blockDim.x + threadIdx.x; const int wid = gid / warpSize; const int objId = wid / WARPS_PER_OBJ; const int locWid = wid % WARPS_PER_OBJ; if (objId >= ovView.nObjects) return; const int3 cidLow = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].low - 0.5_r); const int3 cidHigh = cinfo.getCellIdAlongAxes(ovView.comAndExtents[objId].high + 0.5_r); const int3 span = cidHigh - cidLow + make_int3(1,1,1); const int totCells = span.x * span.y * span.z; for (int i = locWid; i < totCells; i += WARPS_PER_OBJ) { const int3 cid3 = make_int3( i % span.x, (i/span.x) % span.y, i / (span.x*span.y) ) + cidLow; const int cid = cinfo.encode(cid3); if (cid < 0 || cid >= cinfo.totcells) continue; int pstart = cinfo.cellStarts[cid]; int pend = cinfo.cellStarts[cid+1]; #pragma unroll 3 for (int pid = pstart; pid < pend; pid++) { const Particle p(pvView.readParticle(pid)); auto tag = oneParticleInsideMesh(pid, p.r, objId, ovView.comAndExtents[objId].com, mesh, vertices); // Only tag particles inside, default is outside anyways if (laneId() == 0 && tag != BelongingTags::Outside) tags[pid] = tag; } } } } // namespace MeshBelongingKernels void MeshBelongingChecker::_tagInner(ParticleVector *pv, CellList *cl, cudaStream_t stream) { tags_.resize_anew(pv->local()->size()); tags_.clearDevice(stream); auto computeTags = [&](ParticleVectorLocality locality) { ov_->findExtentAndCOM(stream, locality); auto lov = ov_->get(locality); auto view = OVview(ov_, lov); auto vertices = lov->getMeshVertices(stream); auto meshView = MeshView(ov_->mesh.get()); debug("Computing inside/outside tags (against mesh) for %d %s objects '%s' and %d '%s' particles", view.nObjects, getParticleVectorLocalityStr(locality).c_str(), ov_->getCName(), pv->local()->size(), pv->getCName()); constexpr int nthreads = 128; constexpr int warpsPerObject = 1024; SAFE_KERNEL_LAUNCH( MeshBelongingKernels::insideMesh<warpsPerObject>, getNblocks(warpsPerObject*32*view.nObjects, nthreads), nthreads, 0, stream, view, meshView, reinterpret_cast<real4*>(vertices->devPtr()), cl->cellInfo(), cl->getView<PVview>(), tags_.devPtr()); }; computeTags(ParticleVectorLocality::Local); computeTags(ParticleVectorLocality::Halo); } } // namespace mirheo
c46a0a1d6f71f32a9365aeaa7b9f529432c5d880.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <hipcub/hipcub.hpp> #include <rocblas.h> #include <hip/hip_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "range_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace cuda { template <typename T> __global__ void RangeKernel(const T start, const T delta, const int count, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < count) { output[index] = start + delta * index; } } template <typename T> bool RangeImpl(const T start, const T delta, const int count, T* output) { constexpr int block_size = 256; int grid_size = (count + block_size - 1) / block_size; hipLaunchKernelGGL(( RangeKernel<T>), dim3(grid_size), dim3(block_size), 0, 0, start, delta, count, output); return CUDA_CALL(hipPeekAtLastError()); } #define SPECIALIZED_IMPL(T) \ template bool RangeImpl<T>(const T start, const T delta, const int count, T* output); SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(int64_t) SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) } // namespace cuda } // namespace onnxruntime
c46a0a1d6f71f32a9365aeaa7b9f529432c5d880.cu
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cub/cub.cuh> #include <cublas_v2.h> #include <cuda_fp16.h> #include "core/providers/cuda/cu_inc/common.cuh" #include "core/providers/cuda/cuda_common.h" #include "range_impl.h" using namespace onnxruntime::cuda; namespace onnxruntime { namespace cuda { template <typename T> __global__ void RangeKernel(const T start, const T delta, const int count, T* output) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < count) { output[index] = start + delta * index; } } template <typename T> bool RangeImpl(const T start, const T delta, const int count, T* output) { constexpr int block_size = 256; int grid_size = (count + block_size - 1) / block_size; RangeKernel<T><<<grid_size, block_size, 0>>>(start, delta, count, output); return CUDA_CALL(cudaPeekAtLastError()); } #define SPECIALIZED_IMPL(T) \ template bool RangeImpl<T>(const T start, const T delta, const int count, T* output); SPECIALIZED_IMPL(int16_t) SPECIALIZED_IMPL(int32_t) SPECIALIZED_IMPL(int64_t) SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) } // namespace cuda } // namespace onnxruntime
a15aea579c3f42096aa37a13d2678cc003d90675.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> hipError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int size); hipError_t cuda2GPUDotProduct(int *c, const int *a, const int *b, unsigned int N); int* allocAndAssignMat(int size); __global__ void dot(int *c, const int *a, const int *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] += a[i] * b[i]; } int main() { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); const int N = 10000; // this is the sqrt of the total elements or the len of one side of the square matrix const int* a = allocAndAssignMat(N * N); const int* b = allocAndAssignMat(N * N); int* c = (int*)malloc((N * N) * sizeof(int)); for (int i = 0; i < N * N; i++) { c[i] = 0; } int mySum = 0; hipEventRecord(start); // Add vectors in parallel. hipError_t cudaStatus = cuda2GPUDotProduct(c, a, b, N); if (cudaStatus != hipSuccess) { fprintf(stderr, "cudaDotProduct failed!"); return 1; } hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); for (int i = 0; i < N*N; i++) { //printf("%d ", c[i]); mySum += c[i]; } //Results printf("Size of N*N: %d \nResult: %d \nTime in kernel %f", N * N, mySum, milliseconds); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } hipError_t cuda2GPUDotProduct(int *c, const int *a, const int *b, unsigned int N) { int size = N*N; int subSize = N*(N/2); int* dev_c_0; int* dev_a_0; int* dev_c_1; int* dev_a_1; int* dev_b; hipError_t cudaStatus; cudaStatus = hipSetDevice(0); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c_0, subSize * sizeof(int)); // allocating the space on the gpu if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a_0, subSize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpyAsync(dev_a_0, &a[0], subSize * sizeof(int), hipMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpyAsync(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( dot), dim3((subSize+255)/256), dim3(256), 0, 0, dev_c_0, dev_a_0, dev_b); // execution configuration - cudaStatus = hipSetDevice(1); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c_1, subSize * sizeof(int)); // allocating the space on the gpu if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a_1, subSize * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpyAsync(dev_a_1, &a[subSize], subSize * sizeof(int), hipMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpyAsync(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( dot), dim3((subSize+255)/256), dim3(256), 0, 0, dev_c_1, dev_a_1, dev_b); // execution configuration - cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpyAsync(&c[0], dev_c_0, subSize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpyAsync(&c[subSize], dev_c, subSize * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after getting result!\n", cudaStatus); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. hipError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int N) { int size = N * N; int *dev_a; int *dev_b; int *dev_c; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); // allocating the space on the gpu if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpyAsync(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpyAsync(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( dot), dim3((size+255)/256), dim3(256), 0, 0, dev_c, dev_a, dev_b); // execution configuration - // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpyAsync(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; } int* allocAndAssignMat(int size) { /* This function takes in the size of the matrix (N*N) and returns a pointer with appropriate memory allocated as well as filled with values @params: int size @returns: int* ptr */ int* ptr = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { ptr[i] = 2; } return ptr; }
a15aea579c3f42096aa37a13d2678cc003d90675.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> cudaError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int size); cudaError_t cuda2GPUDotProduct(int *c, const int *a, const int *b, unsigned int N); int* allocAndAssignMat(int size); __global__ void dot(int *c, const int *a, const int *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] += a[i] * b[i]; } int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); const int N = 10000; // this is the sqrt of the total elements or the len of one side of the square matrix const int* a = allocAndAssignMat(N * N); const int* b = allocAndAssignMat(N * N); int* c = (int*)malloc((N * N) * sizeof(int)); for (int i = 0; i < N * N; i++) { c[i] = 0; } int mySum = 0; cudaEventRecord(start); // Add vectors in parallel. cudaError_t cudaStatus = cuda2GPUDotProduct(c, a, b, N); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDotProduct failed!"); return 1; } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); for (int i = 0; i < N*N; i++) { //printf("%d ", c[i]); mySum += c[i]; } //Results printf("Size of N*N: %d \nResult: %d \nTime in kernel %f", N * N, mySum, milliseconds); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } cudaError_t cuda2GPUDotProduct(int *c, const int *a, const int *b, unsigned int N) { int size = N*N; int subSize = N*(N/2); int* dev_c_0; int* dev_a_0; int* dev_c_1; int* dev_a_1; int* dev_b; cudaError_t cudaStatus; cudaStatus = cudaSetDevice(0); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c_0, subSize * sizeof(int)); // allocating the space on the gpu if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a_0, subSize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpyAsync(dev_a_0, &a[0], subSize * sizeof(int), cudaMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpyAsync(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. dot<<<(subSize+255)/256, 256>>>(dev_c_0, dev_a_0, dev_b); // execution configuration - cudaStatus = cudaSetDevice(1); // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c_1, subSize * sizeof(int)); // allocating the space on the gpu if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a_1, subSize * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpyAsync(dev_a_1, &a[subSize], subSize * sizeof(int), cudaMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpyAsync(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. dot<<<(subSize+255)/256, 256>>>(dev_c_1, dev_a_1, dev_b); // execution configuration - cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpyAsync(&c[0], dev_c_0, subSize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpyAsync(&c[subSize], dev_c, subSize * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after getting result!\n", cudaStatus); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. cudaError_t cudaDotProduct(int *c, const int *a, const int *b, unsigned int N) { int size = N * N; int *dev_a; int *dev_b; int *dev_c; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // allocating the space on the gpu if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpyAsync(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); // moving the data to the gpu counterpart not c as that is results if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpyAsync(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. dot<<<(size+255)/256, 256>>>(dev_c, dev_a, dev_b); // execution configuration - // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpyAsync(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } int* allocAndAssignMat(int size) { /* This function takes in the size of the matrix (N*N) and returns a pointer with appropriate memory allocated as well as filled with values @params: int size @returns: int* ptr */ int* ptr = (int*)malloc(size * sizeof(int)); for (int i = 0; i < size; i++) { ptr[i] = 2; } return ptr; }
236c2680d9df57096631d804174a50b5f03575c6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/relu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope, Dtype upper_bound) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out[index] = out[index] > upper_bound ? upper_bound : out[index]; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) if (!this->layer_param_.relu_param().has_upper_bound()) { hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope); } else { Dtype upper_bound = this->layer_param_.relu_param().upper_bound(); hipLaunchKernelGGL(( ReLUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope, upper_bound); } CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope, Dtype upper_bound) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * (Dtype((in_data[index] > 0) && (in_data[index] < upper_bound)) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) if (!this->layer_param_.relu_param().has_upper_bound()) { hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope); } else { Dtype upper_bound = this->layer_param_.relu_param().upper_bound(); hipLaunchKernelGGL(( ReLUBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope, upper_bound); } CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
236c2680d9df57096631d804174a50b5f03575c6.cu
#include <algorithm> #include <vector> #include "caffe/layers/relu_layer.hpp" namespace caffe { template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } template <typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope, Dtype upper_bound) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; out[index] = out[index] > upper_bound ? upper_bound : out[index]; } } template <typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) if (!this->layer_param_.relu_param().has_upper_bound()) { ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope); } else { Dtype upper_bound = this->layer_param_.relu_param().upper_bound(); ReLUForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope, upper_bound); } CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope, Dtype upper_bound) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * (Dtype((in_data[index] > 0) && (in_data[index] < upper_bound)) + (in_data[index] <= 0) * negative_slope); } } template <typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) if (!this->layer_param_.relu_param().has_upper_bound()) { ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope); } else { Dtype upper_bound = this->layer_param_.relu_param().upper_bound(); ReLUBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope, upper_bound); } CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
e6175b8ce10afc14122bd9b6e1717cd661cf3638.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #define CUDA_CALL(x) do { if((x)!=hipSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) __global__ void setup(hiprandState_t *state) { int id = threadIdx.x + blockIdx.x * 64; hiprand_init(1234, id, 0, &state[id]); } __global__ void generate(hiprandState_t *state, int n, unsigned int* result) { int id = threadIdx.x + blockIdx.x * 64; float x; hiprandState_t localState = state[id]; for(int i = 0; i < n; i++) { x = hiprand_normal(&localState) * 15 + 50; if (x > 0 && x < 100) result[i * 100 + (int)x]++; } state[id] = localState; } int main(int argc, char *argv[]) { int i, j; hiprandState_t *devStates; unsigned int *devResults, *hostResults; int samples = 10000; unsigned int r[100] = {0}; hostResults = (unsigned int *)calloc(64 * 64 * 100, sizeof(int)); CUDA_CALL(hipMalloc((void **)&devResults, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(hipMemset(devResults, 0, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(hipMalloc((void **)&devStates, 64 * 64 * sizeof(hiprandState_t))); hipLaunchKernelGGL(( setup), dim3(64), dim3(64), 0, 0, devStates); hipLaunchKernelGGL(( generate), dim3(64), dim3(64), 0, 0, devStates, samples, devResults); CUDA_CALL(hipMemcpy(hostResults, devResults, 100 * 64 * 64 * sizeof(unsigned int), hipMemcpyDeviceToHost)); for (i = 0; i < 64 * 64; i++) { for (j = 0; j < 100; j++) { r[j] += hostResults[i * 100 + j]; } } printf("x,y\n"); for (i = 0; i < 100; i++) { printf("%d,%d\n", i, r[i]); } }
e6175b8ce10afc14122bd9b6e1717cd661cf3638.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__);\ return EXIT_FAILURE;}} while(0) __global__ void setup(curandState *state) { int id = threadIdx.x + blockIdx.x * 64; curand_init(1234, id, 0, &state[id]); } __global__ void generate(curandState *state, int n, unsigned int* result) { int id = threadIdx.x + blockIdx.x * 64; float x; curandState localState = state[id]; for(int i = 0; i < n; i++) { x = curand_normal(&localState) * 15 + 50; if (x > 0 && x < 100) result[i * 100 + (int)x]++; } state[id] = localState; } int main(int argc, char *argv[]) { int i, j; curandState *devStates; unsigned int *devResults, *hostResults; int samples = 10000; unsigned int r[100] = {0}; hostResults = (unsigned int *)calloc(64 * 64 * 100, sizeof(int)); CUDA_CALL(cudaMalloc((void **)&devResults, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(cudaMemset(devResults, 0, 100 * 64 * 64 * sizeof(unsigned int))); CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 * sizeof(curandState))); setup<<<64, 64>>>(devStates); generate<<<64, 64>>>(devStates, samples, devResults); CUDA_CALL(cudaMemcpy(hostResults, devResults, 100 * 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); for (i = 0; i < 64 * 64; i++) { for (j = 0; j < 100; j++) { r[j] += hostResults[i * 100 + j]; } } printf("x,y\n"); for (i = 0; i < 100; i++) { printf("%d,%d\n", i, r[i]); } }
da59f1979d5093a8d279effbca881924be4b6d06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stereo_cuda.h" #include <stdio.h> struct Problem { public: unsigned char* img1; unsigned char* img2; int height, width, nccWindowSize; float* res; Problem(unsigned char* img1, unsigned char* img2, int height, int width, float* res, int nccWindowSize) { this->img1 = img1; this->img2 = img2; this->height = height; this->width = width; this->res = res; this->nccWindowSize = nccWindowSize; } }; __device__ void getWindowMeanSTDCUDA(Problem* problem, unsigned char* colorArr, int centerRow, int centerCol, float *mean, float *std) { float windowSum[3] = {0.0, 0.0, 0.0}; int windowSize = 0; int halfWindow = problem->nccWindowSize / 2; int colorArrIdx; for (int r = centerRow - halfWindow; r <= centerRow + halfWindow; r++) { for (int c = centerCol - halfWindow; c <= centerCol + halfWindow; c++) { if (r >= 0 && r < problem->height && c >= 0 && c < problem->width) { for (int channel = 0; channel < 3; channel++) { colorArrIdx = r * problem->width * 3 + c * 3 + channel; windowSum[channel] += colorArr[colorArrIdx]; } windowSize++; } } } // Compute average for (int channel = 0; channel < 3; channel++) { mean[channel] = windowSum[channel] / windowSize; } float varSum[3] = {0.0, 0.0, 0.0}; for (int r = centerRow - halfWindow; r <= centerRow + halfWindow; r++) { for (int c = centerCol - halfWindow; c <= centerCol + halfWindow; c++) { if (r >= 0 && r < problem->height && c >= 0 && c < problem->width) { for (int channel = 0; channel < 3; channel++) { colorArrIdx = r * problem->width * 3 + c * 3 + channel; float diff = colorArr[colorArrIdx] - mean[channel]; varSum[channel] += diff * diff; } } } } for (int channel = 0; channel < 3; channel++) { std[channel] = sqrt(varSum[channel] / windowSize); if (std[channel] < 1e-4) std[channel] = 1e-4; } } __device__ float computeNCCCUDA(Problem *problem, int row1, int col1, int row2, int col2) { float ncc = 0.0; float mean1[3]; float std1[3]; float mean2[3]; float std2[3]; getWindowMeanSTDCUDA(problem, problem->img1, row1, col1, mean1, std1); getWindowMeanSTDCUDA(problem, problem->img2, row2, col2, mean2, std2); int halfWindow = problem->nccWindowSize / 2; int totalContribCount = 0; for (int rDel = -halfWindow; rDel <= halfWindow; rDel++) { for (int cDel = -halfWindow; cDel <= halfWindow; cDel++) { int r1 = row1 + rDel; int c1 = col1 + cDel; int r2 = row2 + rDel; int c2 = col2 + cDel; if (r1 >= 0 && r1 < problem->height && c1 >= 0 && c1 < problem->width && r2 >= 0 && r2 < problem->height && c2 >= 0 && c2 < problem->width) { for (int channel = 0; channel < 3; channel++) { int img1Idx = r1 * problem->width * 3 + c1 * 3 + channel; int img2Idx = r2 * problem->width * 3 + c2 * 3 + channel; float contrib = (problem->img1[img1Idx] - mean1[channel]) * (problem->img2[img2Idx] - mean2[channel]) / (std1[channel] * std2[channel]); ncc += contrib / 3.0; // To account for 3 channels. } totalContribCount++; } } } float avg = ncc / totalContribCount; return avg; } __global__ void computeDisparityKernelCUDA(Problem* problem) { int linearIdx = blockIdx.x * blockDim.x + threadIdx.x; if (linearIdx >= problem->height * problem->width) return; int row = linearIdx / problem->width; int col = linearIdx % problem->width; float bestNCC = -1e10; int bestColSec; for (int colSec = 0; colSec < problem->width; colSec++) { float ncc = computeNCCCUDA(problem, row, col, row, colSec); if (ncc > bestNCC) { bestNCC = ncc; bestColSec = colSec; } } int colDiff = bestColSec - col; if (colDiff < 0) colDiff = -colDiff; problem->res[problem->width * row + col] = colDiff; } float* computeDisparityMapCUDA(unsigned char* img1, unsigned char* img2, int height, int width, int nccWindowSize) { float* res; hipMalloc(&res, sizeof(float) * height * width); Problem* problemGPU; Problem problemCPU(img1, img2, height, width, res, nccWindowSize); hipMalloc(&problemGPU, sizeof(Problem)); hipMemcpy(problemGPU, &problemCPU, sizeof(Problem), hipMemcpyHostToDevice); int numOfCells = height * width; int numThreads = 512; int numBlocks = (numOfCells + numThreads - 1) / numThreads; hipLaunchKernelGGL(( computeDisparityKernelCUDA), dim3(numBlocks), dim3(numThreads), 0, 0, problemGPU); hipDeviceSynchronize(); float* resCPU = new float[height * width]; hipMemcpy(resCPU, res, sizeof(float) * height * width, hipMemcpyDeviceToHost); hipFree(res); hipFree(problemGPU); return resCPU; }
da59f1979d5093a8d279effbca881924be4b6d06.cu
#include "stereo_cuda.h" #include <stdio.h> struct Problem { public: unsigned char* img1; unsigned char* img2; int height, width, nccWindowSize; float* res; Problem(unsigned char* img1, unsigned char* img2, int height, int width, float* res, int nccWindowSize) { this->img1 = img1; this->img2 = img2; this->height = height; this->width = width; this->res = res; this->nccWindowSize = nccWindowSize; } }; __device__ void getWindowMeanSTDCUDA(Problem* problem, unsigned char* colorArr, int centerRow, int centerCol, float *mean, float *std) { float windowSum[3] = {0.0, 0.0, 0.0}; int windowSize = 0; int halfWindow = problem->nccWindowSize / 2; int colorArrIdx; for (int r = centerRow - halfWindow; r <= centerRow + halfWindow; r++) { for (int c = centerCol - halfWindow; c <= centerCol + halfWindow; c++) { if (r >= 0 && r < problem->height && c >= 0 && c < problem->width) { for (int channel = 0; channel < 3; channel++) { colorArrIdx = r * problem->width * 3 + c * 3 + channel; windowSum[channel] += colorArr[colorArrIdx]; } windowSize++; } } } // Compute average for (int channel = 0; channel < 3; channel++) { mean[channel] = windowSum[channel] / windowSize; } float varSum[3] = {0.0, 0.0, 0.0}; for (int r = centerRow - halfWindow; r <= centerRow + halfWindow; r++) { for (int c = centerCol - halfWindow; c <= centerCol + halfWindow; c++) { if (r >= 0 && r < problem->height && c >= 0 && c < problem->width) { for (int channel = 0; channel < 3; channel++) { colorArrIdx = r * problem->width * 3 + c * 3 + channel; float diff = colorArr[colorArrIdx] - mean[channel]; varSum[channel] += diff * diff; } } } } for (int channel = 0; channel < 3; channel++) { std[channel] = sqrt(varSum[channel] / windowSize); if (std[channel] < 1e-4) std[channel] = 1e-4; } } __device__ float computeNCCCUDA(Problem *problem, int row1, int col1, int row2, int col2) { float ncc = 0.0; float mean1[3]; float std1[3]; float mean2[3]; float std2[3]; getWindowMeanSTDCUDA(problem, problem->img1, row1, col1, mean1, std1); getWindowMeanSTDCUDA(problem, problem->img2, row2, col2, mean2, std2); int halfWindow = problem->nccWindowSize / 2; int totalContribCount = 0; for (int rDel = -halfWindow; rDel <= halfWindow; rDel++) { for (int cDel = -halfWindow; cDel <= halfWindow; cDel++) { int r1 = row1 + rDel; int c1 = col1 + cDel; int r2 = row2 + rDel; int c2 = col2 + cDel; if (r1 >= 0 && r1 < problem->height && c1 >= 0 && c1 < problem->width && r2 >= 0 && r2 < problem->height && c2 >= 0 && c2 < problem->width) { for (int channel = 0; channel < 3; channel++) { int img1Idx = r1 * problem->width * 3 + c1 * 3 + channel; int img2Idx = r2 * problem->width * 3 + c2 * 3 + channel; float contrib = (problem->img1[img1Idx] - mean1[channel]) * (problem->img2[img2Idx] - mean2[channel]) / (std1[channel] * std2[channel]); ncc += contrib / 3.0; // To account for 3 channels. } totalContribCount++; } } } float avg = ncc / totalContribCount; return avg; } __global__ void computeDisparityKernelCUDA(Problem* problem) { int linearIdx = blockIdx.x * blockDim.x + threadIdx.x; if (linearIdx >= problem->height * problem->width) return; int row = linearIdx / problem->width; int col = linearIdx % problem->width; float bestNCC = -1e10; int bestColSec; for (int colSec = 0; colSec < problem->width; colSec++) { float ncc = computeNCCCUDA(problem, row, col, row, colSec); if (ncc > bestNCC) { bestNCC = ncc; bestColSec = colSec; } } int colDiff = bestColSec - col; if (colDiff < 0) colDiff = -colDiff; problem->res[problem->width * row + col] = colDiff; } float* computeDisparityMapCUDA(unsigned char* img1, unsigned char* img2, int height, int width, int nccWindowSize) { float* res; cudaMalloc(&res, sizeof(float) * height * width); Problem* problemGPU; Problem problemCPU(img1, img2, height, width, res, nccWindowSize); cudaMalloc(&problemGPU, sizeof(Problem)); cudaMemcpy(problemGPU, &problemCPU, sizeof(Problem), cudaMemcpyHostToDevice); int numOfCells = height * width; int numThreads = 512; int numBlocks = (numOfCells + numThreads - 1) / numThreads; computeDisparityKernelCUDA<<<numBlocks, numThreads>>>(problemGPU); cudaDeviceSynchronize(); float* resCPU = new float[height * width]; cudaMemcpy(resCPU, res, sizeof(float) * height * width, cudaMemcpyDeviceToHost); cudaFree(res); cudaFree(problemGPU); return resCPU; }
023c62898aca864fd52e638a4ef14371511635fe.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const float* const vIn, const size_t numPoints, const size_t dilate, float* const imageOut){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; if((z > 0)){ //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; //pin point camera model y = y/z; x = x/z; y = round(y); x = round(x); //sanity check if(!((x > -100) && (y > -100) && (x < 100000) && (y < 100000))){ return; } for(int ix = x-dilate; ix <= x+dilate; ix++){ for(int iy = y-dilate; iy <= y+dilate; iy++){ if((ix >= 0) && (iy >= 0) && (ix < imWidth) && (iy < imHeight)){ imageOut[iy + ix*imHeight] = vIn[i]; } } } } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tform = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * cam = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t dilate = ((uint32_T *) mxGetData(prhs[4]))[0]; size_t numPoints = mxGPUGetDimensions(points)[0]; size_t numChannels = mxGPUGetDimensions(points)[1] - 3; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tform)); float* camPtr = (float*)(mxGPUGetDataReadOnly(cam)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(points)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); float* vInPtr = &(zInPtr[numPoints]); //create output mwSize outSize[] = {imHeight,imWidth,numChannels}; mxGPUArray* out = mxGPUCreateGPUArray(3, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); plhs[0] = mxGPUCreateMxArrayOnGPU(out); float* outPtr = (float*)(mxGPUGetData(out)); //run and get ouputs for(size_t i = 0; i < numChannels; i++){ if(i != 0){ vInPtr = &(vInPtr[numPoints]); outPtr = &(outPtr[imWidth*imHeight]); } hipLaunchKernelGGL(( CameraTransformKernel), dim3(gridSize(numPoints)), dim3(BLOCK_SIZE), 0, 0, tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, vInPtr, numPoints, dilate, outPtr); CudaCheckError(); } //destroy reference structures mxGPUDestroyGPUArray(tform); mxGPUDestroyGPUArray(cam); mxGPUDestroyGPUArray(points); mxGPUDestroyGPUArray(out); }
023c62898aca864fd52e638a4ef14371511635fe.cu
/* function for projecting lidar points * */ #include "../common.h" __global__ void CameraTransformKernel(const float* const tform, const float* const cam, const size_t imWidth, const size_t imHeight, const float* const xIn, const float* const yIn, const float* const zIn, const float* const vIn, const size_t numPoints, const size_t dilate, float* const imageOut){ unsigned int i = blockDim.x * blockIdx.x + threadIdx.x; if(i >= numPoints){ return; } //transform points float x = xIn[i]*tform[0] + yIn[i]*tform[4] + zIn[i]*tform[8] + tform[12]; float y = xIn[i]*tform[1] + yIn[i]*tform[5] + zIn[i]*tform[9] + tform[13]; float z = xIn[i]*tform[2] + yIn[i]*tform[6] + zIn[i]*tform[10] + tform[14]; if((z > 0)){ //apply projective camera matrix x = cam[0]*x + cam[3]*y + cam[6]*z + cam[9]; y = cam[1]*x + cam[4]*y + cam[7]*z + cam[10]; z = cam[2]*x + cam[5]*y + cam[8]*z + cam[11]; //pin point camera model y = y/z; x = x/z; y = round(y); x = round(x); //sanity check if(!((x > -100) && (y > -100) && (x < 100000) && (y < 100000))){ return; } for(int ix = x-dilate; ix <= x+dilate; ix++){ for(int iy = y-dilate; iy <= y+dilate; iy++){ if((ix >= 0) && (iy >= 0) && (ix < imWidth) && (iy < imHeight)){ imageOut[iy + ix*imHeight] = vIn[i]; } } } } } void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[]) { //initialize the MathWorks GPU API. mxInitGPU(); //read data mxGPUArray const * tform = mxGPUCreateFromMxArray(prhs[0]); mxGPUArray const * cam = mxGPUCreateFromMxArray(prhs[1]); mxGPUArray const * points = mxGPUCreateFromMxArray(prhs[2]); size_t imWidth = ((uint32_T *) mxGetData(prhs[3]))[1]; size_t imHeight = ((uint32_T *) mxGetData(prhs[3]))[0]; size_t dilate = ((uint32_T *) mxGetData(prhs[4]))[0]; size_t numPoints = mxGPUGetDimensions(points)[0]; size_t numChannels = mxGPUGetDimensions(points)[1] - 3; //get input pointers float* tformPtr = (float*)(mxGPUGetDataReadOnly(tform)); float* camPtr = (float*)(mxGPUGetDataReadOnly(cam)); float* xInPtr = (float*)(mxGPUGetDataReadOnly(points)); float* yInPtr = &(xInPtr[numPoints]); float* zInPtr = &(yInPtr[numPoints]); float* vInPtr = &(zInPtr[numPoints]); //create output mwSize outSize[] = {imHeight,imWidth,numChannels}; mxGPUArray* out = mxGPUCreateGPUArray(3, outSize, mxSINGLE_CLASS, mxREAL, MX_GPU_INITIALIZE_VALUES); plhs[0] = mxGPUCreateMxArrayOnGPU(out); float* outPtr = (float*)(mxGPUGetData(out)); //run and get ouputs for(size_t i = 0; i < numChannels; i++){ if(i != 0){ vInPtr = &(vInPtr[numPoints]); outPtr = &(outPtr[imWidth*imHeight]); } CameraTransformKernel<<<gridSize(numPoints), BLOCK_SIZE>>>(tformPtr, camPtr, imWidth, imHeight, xInPtr, yInPtr, zInPtr, vInPtr, numPoints, dilate, outPtr); CudaCheckError(); } //destroy reference structures mxGPUDestroyGPUArray(tform); mxGPUDestroyGPUArray(cam); mxGPUDestroyGPUArray(points); mxGPUDestroyGPUArray(out); }
c2083eb1a073e5c202112bb2704c1f0ef481fedb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_uvector.hpp> #include <raft/handle.hpp> #include <utilities/error.hpp> #include <utilities/path_retrieval.hpp> namespace cugraph { namespace detail { template <typename vertex_t, typename weight_t> __global__ void get_traversed_cost_kernel(vertex_t const *vertices, vertex_t const *preds, vertex_t const *vtx_map, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices; i += gridDim.x * blockDim.x) { weight_t sum = info_weights[i]; vertex_t pred = preds[i]; while (pred != stop_vertex) { vertex_t pos = vtx_map[pred]; sum += info_weights[pos]; pred = preds[pos]; } out[i] = sum; } } template <typename vertex_t, typename weight_t> void get_traversed_cost_impl(raft::handle_t const &handle, vertex_t const *vertices, vertex_t const *preds, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { auto stream = handle.get_stream(); vertex_t max_blocks = handle.get_device_properties().maxGridSize[0]; vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock; dim3 nthreads, nblocks; nthreads.x = std::min<vertex_t>(num_vertices, max_threads); nthreads.y = 1; nthreads.z = 1; nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks); nblocks.y = 1; nblocks.z = 1; rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream); rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream); vertex_t *vtx_map = vtx_map_v.data(); vertex_t *vtx_keys = vtx_keys_v.data(); raft::copy(vtx_keys, vertices, num_vertices, stream); thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map); hipLaunchKernelGGL(( get_traversed_cost_kernel), dim3(nblocks), dim3(nthreads), 0, 0, vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices); } } // namespace detail template <typename vertex_t, typename weight_t> void get_traversed_cost(raft::handle_t const &handle, vertex_t const *vertices, vertex_t const *preds, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive"); CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices"); cugraph::detail::get_traversed_cost_impl( handle, vertices, preds, info_weights, out, stop_vertex, num_vertices); } template void get_traversed_cost<int32_t, float>(raft::handle_t const &handle, int32_t const *vertices, int32_t const *preds, float const *info_weights, float *out, int32_t stop_vertex, int32_t num_vertices); template void get_traversed_cost<int32_t, double>(raft::handle_t const &handle, int32_t const *vertices, int32_t const *preds, double const *info_weights, double *out, int32_t stop_vertex, int32_t num_vertices); template void get_traversed_cost<int64_t, float>(raft::handle_t const &handle, int64_t const *vertices, int64_t const *preds, float const *info_weights, float *out, int64_t stop_vertex, int64_t num_vertices); template void get_traversed_cost<int64_t, double>(raft::handle_t const &handle, int64_t const *vertices, int64_t const *preds, double const *info_weights, double *out, int64_t stop_vertex, int64_t num_vertices); } // namespace cugraph
c2083eb1a073e5c202112bb2704c1f0ef481fedb.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_uvector.hpp> #include <raft/handle.hpp> #include <utilities/error.hpp> #include <utilities/path_retrieval.hpp> namespace cugraph { namespace detail { template <typename vertex_t, typename weight_t> __global__ void get_traversed_cost_kernel(vertex_t const *vertices, vertex_t const *preds, vertex_t const *vtx_map, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { for (vertex_t i = threadIdx.x + blockIdx.x * blockDim.x; i < num_vertices; i += gridDim.x * blockDim.x) { weight_t sum = info_weights[i]; vertex_t pred = preds[i]; while (pred != stop_vertex) { vertex_t pos = vtx_map[pred]; sum += info_weights[pos]; pred = preds[pos]; } out[i] = sum; } } template <typename vertex_t, typename weight_t> void get_traversed_cost_impl(raft::handle_t const &handle, vertex_t const *vertices, vertex_t const *preds, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { auto stream = handle.get_stream(); vertex_t max_blocks = handle.get_device_properties().maxGridSize[0]; vertex_t max_threads = handle.get_device_properties().maxThreadsPerBlock; dim3 nthreads, nblocks; nthreads.x = std::min<vertex_t>(num_vertices, max_threads); nthreads.y = 1; nthreads.z = 1; nblocks.x = std::min<vertex_t>((num_vertices + nthreads.x - 1) / nthreads.x, max_blocks); nblocks.y = 1; nblocks.z = 1; rmm::device_uvector<vertex_t> vtx_map_v(num_vertices, stream); rmm::device_uvector<vertex_t> vtx_keys_v(num_vertices, stream); vertex_t *vtx_map = vtx_map_v.data(); vertex_t *vtx_keys = vtx_keys_v.data(); raft::copy(vtx_keys, vertices, num_vertices, stream); thrust::sequence(rmm::exec_policy(stream)->on(stream), vtx_map, vtx_map + num_vertices); thrust::stable_sort_by_key( rmm::exec_policy(stream)->on(stream), vtx_keys, vtx_keys + num_vertices, vtx_map); get_traversed_cost_kernel<<<nblocks, nthreads>>>( vertices, preds, vtx_map, info_weights, out, stop_vertex, num_vertices); } } // namespace detail template <typename vertex_t, typename weight_t> void get_traversed_cost(raft::handle_t const &handle, vertex_t const *vertices, vertex_t const *preds, weight_t const *info_weights, weight_t *out, vertex_t stop_vertex, vertex_t num_vertices) { CUGRAPH_EXPECTS(num_vertices > 0, "num_vertices should be strictly positive"); CUGRAPH_EXPECTS(out != nullptr, "out should be of size num_vertices"); cugraph::detail::get_traversed_cost_impl( handle, vertices, preds, info_weights, out, stop_vertex, num_vertices); } template void get_traversed_cost<int32_t, float>(raft::handle_t const &handle, int32_t const *vertices, int32_t const *preds, float const *info_weights, float *out, int32_t stop_vertex, int32_t num_vertices); template void get_traversed_cost<int32_t, double>(raft::handle_t const &handle, int32_t const *vertices, int32_t const *preds, double const *info_weights, double *out, int32_t stop_vertex, int32_t num_vertices); template void get_traversed_cost<int64_t, float>(raft::handle_t const &handle, int64_t const *vertices, int64_t const *preds, float const *info_weights, float *out, int64_t stop_vertex, int64_t num_vertices); template void get_traversed_cost<int64_t, double>(raft::handle_t const &handle, int64_t const *vertices, int64_t const *preds, double const *info_weights, double *out, int64_t stop_vertex, int64_t num_vertices); } // namespace cugraph
bb79952aa0fcb1afc7d611a6585395a10a4bd40c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** File name: bfs_cpu_status_array.cu Author: Yuede Ji Last update: 13:30 10-10-2015 Description: Using status array to implent GPU version of bfs. Calculate the shortest distance from 0 to others **/ #include <stdio.h> #include <stdlib.h> #include <string.h> //Using arrays to implement queue /**char filein[] = "/home/yuede/dataset/kron_16_16.dat";// no need char fileout[] = "/home/yuede/dataset/kron_16_16.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_16_16.csr"; **/ char filein[] = "/home/yuede/dataset/kron_10_4.dat";// no need char fileout[] = "/home/yuede/dataset/kron_10_4.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_10_4.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_10_4.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_10_4.csr"; const int v_num = 65535; const int e_num = 2097152; const int INF = 0x7FFFFFFF; const int threads_num = 256; int beg_pos[v_num+1]; int csr[e_num]; int sa[v_num]; //load from .dat files, and store in array csr[N*N], beg_pos[N] int csr_begin(int v, int e); void bfs_sa(int root, int v, int e); __global__ void traverse_one(int level, int * dev_sa, int * dev_beg_pos, int * dev_csr, bool dev_flag) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(dev_sa[id] == level)///node i belongs to current level { //int j = dev_beg_pos[id]; for(int j=dev_beg_pos[id]; j<dev_beg_pos[id+1]; ++j) { if(dev_sa[dev_csr[j]] > level + 1) { dev_sa[dev_csr[j]] = level + 1; printf("%d\n", dev_csr[j]); if(!(dev_flag)) dev_flag = true; } } } } int main() { csr_begin(v_num, e_num); bfs_sa(0, v_num, e_num); FILE * fp_out = fopen(fileout, "w"); for(int i=0; i<v_num; ++i) fprintf(fp_out, "%d\n", sa[i]); fclose(fp_out); return 0; } void bfs_sa(int root, int v, int e) { for(int i=0; i<v; ++i) sa[i] = INF; int level = 0; sa[0] = 0; bool flag = true; //flag whether current level has nodes int *dev_sa; int *dev_beg_pos; int *dev_csr; hipMalloc( (void **) &dev_sa, v*sizeof(int)); hipMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(int)); hipMalloc( (void **) &dev_csr, e*sizeof(int)); hipMemcpy(dev_sa, sa, v*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_beg_pos, beg_pos, (v+1)*sizeof(int), hipMemcpyHostToDevice); hipMemcpy(dev_csr, csr, e*sizeof(int), hipMemcpyHostToDevice); bool dev_flag; hipMalloc( (void **) &dev_flag, sizeof(bool)); while(flag) { printf("level = %d\n", level); flag = false; hipMemcpy(&dev_flag, &flag, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( traverse_one), dim3(threads_num), dim3(threads_num), 0, 0, level, dev_sa, dev_beg_pos, dev_csr, dev_flag); ++level; hipMemcpy(&flag, &dev_flag, sizeof(bool), hipMemcpyDeviceToHost); } hipMemcpy(sa, dev_sa, v*sizeof(int), hipMemcpyDeviceToHost); hipFree(dev_sa); hipFree(dev_beg_pos); hipFree(dev_csr); } int csr_begin(int v, int e) { FILE * fp_beg = fopen(file_beg_pos, "r"); int i = 0; int p; while(fscanf(fp_beg, "%d", &p) != EOF) { beg_pos[i] = p; ++i; } fclose(fp_beg); i = 0; FILE * fp_csr = fopen(file_csr, "r"); while(fscanf(fp_csr, "%d", &p) != EOF) { csr[i] = p; ++i; } fclose(fp_csr); return v; }
bb79952aa0fcb1afc7d611a6585395a10a4bd40c.cu
/** File name: bfs_cpu_status_array.cu Author: Yuede Ji Last update: 13:30 10-10-2015 Description: Using status array to implent GPU version of bfs. Calculate the shortest distance from 0 to others **/ #include <stdio.h> #include <stdlib.h> #include <string.h> //Using arrays to implement queue /**char filein[] = "/home/yuede/dataset/kron_16_16.dat";// no need char fileout[] = "/home/yuede/dataset/kron_16_16.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_16_16.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_16_16.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_16_16.csr"; **/ char filein[] = "/home/yuede/dataset/kron_10_4.dat";// no need char fileout[] = "/home/yuede/dataset/kron_10_4.gpu.as.result"; char file_v_e[] = "/home/yuede/dataset/kron_10_4.v_e"; char file_beg_pos[] = "/home/yuede/dataset/kron_10_4.beg.pos"; char file_csr[] = "/home/yuede/dataset/kron_10_4.csr"; const int v_num = 65535; const int e_num = 2097152; const int INF = 0x7FFFFFFF; const int threads_num = 256; int beg_pos[v_num+1]; int csr[e_num]; int sa[v_num]; //load from .dat files, and store in array csr[N*N], beg_pos[N] int csr_begin(int v, int e); void bfs_sa(int root, int v, int e); __global__ void traverse_one(int level, int * dev_sa, int * dev_beg_pos, int * dev_csr, bool dev_flag) { int id = threadIdx.x + blockIdx.x * blockDim.x; if(dev_sa[id] == level)///node i belongs to current level { //int j = dev_beg_pos[id]; for(int j=dev_beg_pos[id]; j<dev_beg_pos[id+1]; ++j) { if(dev_sa[dev_csr[j]] > level + 1) { dev_sa[dev_csr[j]] = level + 1; printf("%d\n", dev_csr[j]); if(!(dev_flag)) dev_flag = true; } } } } int main() { csr_begin(v_num, e_num); bfs_sa(0, v_num, e_num); FILE * fp_out = fopen(fileout, "w"); for(int i=0; i<v_num; ++i) fprintf(fp_out, "%d\n", sa[i]); fclose(fp_out); return 0; } void bfs_sa(int root, int v, int e) { for(int i=0; i<v; ++i) sa[i] = INF; int level = 0; sa[0] = 0; bool flag = true; //flag whether current level has nodes int *dev_sa; int *dev_beg_pos; int *dev_csr; cudaMalloc( (void **) &dev_sa, v*sizeof(int)); cudaMalloc( (void **) &dev_beg_pos, (v+1)*sizeof(int)); cudaMalloc( (void **) &dev_csr, e*sizeof(int)); cudaMemcpy(dev_sa, sa, v*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_beg_pos, beg_pos, (v+1)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_csr, csr, e*sizeof(int), cudaMemcpyHostToDevice); bool dev_flag; cudaMalloc( (void **) &dev_flag, sizeof(bool)); while(flag) { printf("level = %d\n", level); flag = false; cudaMemcpy(&dev_flag, &flag, sizeof(bool), cudaMemcpyHostToDevice); traverse_one<<<threads_num, threads_num>>>(level, dev_sa, dev_beg_pos, dev_csr, dev_flag); ++level; cudaMemcpy(&flag, &dev_flag, sizeof(bool), cudaMemcpyDeviceToHost); } cudaMemcpy(sa, dev_sa, v*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_sa); cudaFree(dev_beg_pos); cudaFree(dev_csr); } int csr_begin(int v, int e) { FILE * fp_beg = fopen(file_beg_pos, "r"); int i = 0; int p; while(fscanf(fp_beg, "%d", &p) != EOF) { beg_pos[i] = p; ++i; } fclose(fp_beg); i = 0; FILE * fp_csr = fopen(file_csr, "r"); while(fscanf(fp_csr, "%d", &p) != EOF) { csr[i] = p; ++i; } fclose(fp_csr); return v; }
lstm_gpu_benchmark.hip
// !!! This is a file automatically generated by hipify!!! #include "../include/rnn2d/lstm_gpu.h" #include "../include/rnn2d/cuda_utils.h" #include <benchmark/benchmark.h> #include <hiprand/hiprand.h> #include <glog/logging.h> #include <thrust/device_vector.h> using thrust::device_vector; #define DEFAULT_H 32 #define DEFAULT_W 256 #define DEFAULT_N 16 #define DEFAULT_K 16 #define DEFAULT_D 16 #define CHECK_CURAND(x) CHECK_EQ((x), HIPRAND_STATUS_SUCCESS) template <typename T> class LstmWrapper { public: LstmWrapper(const int H, const int W, const int N, const int K, const int D) : H_(H), W_(W), N_(N), K_(K), D_(D) { input_ = data_.data().get(); output_ = input_ + GetInputSize(H_, W_, N_, K_); param_ = output_ + GetOutputSize(H_, W_, N_, D_); gradInput_ = param_ + GetParamSize(K_, D_); gradOutput_ = gradInput_ + GetInputSize(H_, W_, N_, K_); gradParam_ = gradOutput_ + GetOutputSize(H_, W_, N_, K_); wspace_ = static_cast<void *>(wrspace_.data().get()); rspace_ = static_cast<void *>( wrspace_.data().get() + ::max( GetTrainingWorkspaceSize(H_, W_, N_, D_), GetInferenceWorkspaceSize(H_, W_, N_, D_))); } static void Initialize(const int H, const int W, const int N, const int K, const int D) { const size_t data_size = 2 * (GetInputSize(H, W, N, K) + GetOutputSize(H, W, N, D) + GetParamSize(K, D)); const size_t wrspace_size = ::max(GetTrainingWorkspaceSize(H, W, N, D), GetInferenceWorkspaceSize(H, W, N, D)) + GetReserveSize(H, W, N, D); const size_t data_size_mb = data_size * sizeof(T) / static_cast<float>(1 << 20); const size_t wrspace_size_mb = wrspace_size / static_cast<float>(1 << 20); LOG(INFO) << "Allocating " << data_size_mb + wrspace_size_mb << "MB in the GPU..."; data_.resize(data_size); wrspace_.resize(wrspace_size); LOG(INFO) << "Filling " << data_size_mb << "MB with random numbers..."; GenerateUniform(data_.data().get(), data_size); LOG(INFO) << "Done!"; } static void Destroy() { data_.clear(); wrspace_.clear(); data_.shrink_to_fit(); wrspace_.shrink_to_fit(); } inline void ForwardInference(); inline void ForwardTraining(); inline void BackwardData(); inline void BackwardParam(); private: static size_t GetInputSize(const int H, const int W, const int N, const int K) { return rnn2d_lstm_input_nelem(H, W, N, K); } static size_t GetOutputSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_output_nelem(H, W, N, D); } static size_t GetParamSize(const int K, const int D) { return rnn2d_lstm_parameters_nelem(K, D); } static size_t GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D); static size_t GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D); static size_t GetReserveSize(const int H, const int W, const int N, const int D); static void GenerateUniform(T* data, size_t n); static device_vector<T> data_; static device_vector<char> wrspace_; const int H_, W_, N_, K_, D_; T *input_, *output_, *param_, *gradInput_, *gradOutput_, *gradParam_; void *wspace_, *rspace_; }; template <> device_vector<float> LstmWrapper<float>::data_ = device_vector<float>(); template <> device_vector<char> LstmWrapper<float>::wrspace_ = device_vector<char>(); template <> device_vector<double> LstmWrapper<double>::data_ = device_vector<double>(); template <> device_vector<char> LstmWrapper<double>::wrspace_ = device_vector<char>(); template <> size_t LstmWrapper<float>::GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_inference_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_inference_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<float>::GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_training_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_training_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<float>::GetReserveSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_training_reserve_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetReserveSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_training_reserve_size(H, W, N, D); } template <> void LstmWrapper<float>::GenerateUniform(float* data, size_t n) { hiprandGenerator_t gen; CHECK_CURAND(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); CHECK_CURAND(hiprandGenerateUniform(gen, data, n)); CHECK_CUDA_CALL(hipDeviceSynchronize()); CHECK_CURAND(hiprandDestroyGenerator(gen)); } template <> void LstmWrapper<double>::GenerateUniform(double* data, size_t n) { hiprandGenerator_t gen; CHECK_CURAND(hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(hiprandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); CHECK_CURAND(hiprandGenerateUniformDouble(gen, data, n)); CHECK_CUDA_CALL(hipDeviceSynchronize()); CHECK_CURAND(hiprandDestroyGenerator(gen)); } template <> void LstmWrapper<float>::ForwardInference() { rnn2d_lstm_gpu_float_fw_inference(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_); } template <> void LstmWrapper<double>::ForwardInference() { rnn2d_lstm_gpu_double_fw_inference(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_); } template <> void LstmWrapper<float>::ForwardTraining() { rnn2d_lstm_gpu_float_fw_training(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_, rspace_); } template <> void LstmWrapper<double>::ForwardTraining() { rnn2d_lstm_gpu_double_fw_training(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_, rspace_); } template <> void LstmWrapper<float>::BackwardData() { rnn2d_lstm_gpu_float_bw_data(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, gradOutput_, gradInput_, wspace_, rspace_); } template <> void LstmWrapper<double>::BackwardData() { rnn2d_lstm_gpu_double_bw_data(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, gradOutput_, gradInput_, wspace_, rspace_); } template <> void LstmWrapper<float>::BackwardParam() { rnn2d_lstm_gpu_float_bw_param(H_, W_, N_, K_, D_, input_, output_, 1.0, gradParam_, wspace_, rspace_); } template <> void LstmWrapper<double>::BackwardParam() { rnn2d_lstm_gpu_double_bw_param(H_, W_, N_, K_, D_, input_, output_, 1.0, gradParam_, wspace_, rspace_); } template <typename T> static void BM_fw_inference(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.ForwardInference(); while (state.KeepRunning()) { lstm.ForwardInference(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_fw_training(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.ForwardTraining(); while (state.KeepRunning()) { lstm.ForwardTraining(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_data(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardData(); while (state.KeepRunning()) { lstm.BackwardData(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_param(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardParam(); while (state.KeepRunning()) { lstm.BackwardParam(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_ALL(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardData(); while (state.KeepRunning()) { lstm.BackwardData(); lstm.BackwardParam(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } #define INSTANTIATE_BENCHMARKS(TYPE) \ BENCHMARK_TEMPLATE(BM_fw_inference, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_fw_training, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_data, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_param, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_ALL, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime() INSTANTIATE_BENCHMARKS(float); INSTANTIATE_BENCHMARKS(double); int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); benchmark::Initialize(&argc, argv); // Get number of CUDA devices and their names. int numDevices = 0; CHECK_CUDA_CALL(hipGetDeviceCount(&numDevices)); hipDeviceProp_t* dp = new hipDeviceProp_t[numDevices]; for (int d = 0; d < numDevices; ++d) { CHECK_CUDA_CALL(hipGetDeviceProperties(&dp[d], d)); } // Set the CUDA device to use if (argc > 1) { CHECK_CUDA_CALL(hipSetDevice(atoi(argv[1]))); } // Display the name of the CUDA device being used int curDevice = 0; CHECK_CUDA_CALL(hipGetDevice(&curDevice)); LOG(INFO) << "Found " << numDevices << " CUDA devices, using device " << curDevice << ": " << dp[curDevice].name; LstmWrapper<float>::Initialize(DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D); LstmWrapper<double>::Initialize(DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D); benchmark::RunSpecifiedBenchmarks(); LstmWrapper<float>::Destroy(); LstmWrapper<double>::Destroy(); return 0; }
lstm_gpu_benchmark.cu
#include "../include/rnn2d/lstm_gpu.h" #include "../include/rnn2d/cuda_utils.h" #include <benchmark/benchmark.h> #include <curand.h> #include <glog/logging.h> #include <thrust/device_vector.h> using thrust::device_vector; #define DEFAULT_H 32 #define DEFAULT_W 256 #define DEFAULT_N 16 #define DEFAULT_K 16 #define DEFAULT_D 16 #define CHECK_CURAND(x) CHECK_EQ((x), CURAND_STATUS_SUCCESS) template <typename T> class LstmWrapper { public: LstmWrapper(const int H, const int W, const int N, const int K, const int D) : H_(H), W_(W), N_(N), K_(K), D_(D) { input_ = data_.data().get(); output_ = input_ + GetInputSize(H_, W_, N_, K_); param_ = output_ + GetOutputSize(H_, W_, N_, D_); gradInput_ = param_ + GetParamSize(K_, D_); gradOutput_ = gradInput_ + GetInputSize(H_, W_, N_, K_); gradParam_ = gradOutput_ + GetOutputSize(H_, W_, N_, K_); wspace_ = static_cast<void *>(wrspace_.data().get()); rspace_ = static_cast<void *>( wrspace_.data().get() + std::max( GetTrainingWorkspaceSize(H_, W_, N_, D_), GetInferenceWorkspaceSize(H_, W_, N_, D_))); } static void Initialize(const int H, const int W, const int N, const int K, const int D) { const size_t data_size = 2 * (GetInputSize(H, W, N, K) + GetOutputSize(H, W, N, D) + GetParamSize(K, D)); const size_t wrspace_size = std::max(GetTrainingWorkspaceSize(H, W, N, D), GetInferenceWorkspaceSize(H, W, N, D)) + GetReserveSize(H, W, N, D); const size_t data_size_mb = data_size * sizeof(T) / static_cast<float>(1 << 20); const size_t wrspace_size_mb = wrspace_size / static_cast<float>(1 << 20); LOG(INFO) << "Allocating " << data_size_mb + wrspace_size_mb << "MB in the GPU..."; data_.resize(data_size); wrspace_.resize(wrspace_size); LOG(INFO) << "Filling " << data_size_mb << "MB with random numbers..."; GenerateUniform(data_.data().get(), data_size); LOG(INFO) << "Done!"; } static void Destroy() { data_.clear(); wrspace_.clear(); data_.shrink_to_fit(); wrspace_.shrink_to_fit(); } inline void ForwardInference(); inline void ForwardTraining(); inline void BackwardData(); inline void BackwardParam(); private: static size_t GetInputSize(const int H, const int W, const int N, const int K) { return rnn2d_lstm_input_nelem(H, W, N, K); } static size_t GetOutputSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_output_nelem(H, W, N, D); } static size_t GetParamSize(const int K, const int D) { return rnn2d_lstm_parameters_nelem(K, D); } static size_t GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D); static size_t GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D); static size_t GetReserveSize(const int H, const int W, const int N, const int D); static void GenerateUniform(T* data, size_t n); static device_vector<T> data_; static device_vector<char> wrspace_; const int H_, W_, N_, K_, D_; T *input_, *output_, *param_, *gradInput_, *gradOutput_, *gradParam_; void *wspace_, *rspace_; }; template <> device_vector<float> LstmWrapper<float>::data_ = device_vector<float>(); template <> device_vector<char> LstmWrapper<float>::wrspace_ = device_vector<char>(); template <> device_vector<double> LstmWrapper<double>::data_ = device_vector<double>(); template <> device_vector<char> LstmWrapper<double>::wrspace_ = device_vector<char>(); template <> size_t LstmWrapper<float>::GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_inference_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetInferenceWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_inference_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<float>::GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_training_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetTrainingWorkspaceSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_training_workspace_size(H, W, N, D); } template <> size_t LstmWrapper<float>::GetReserveSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_float_training_reserve_size(H, W, N, D); } template <> size_t LstmWrapper<double>::GetReserveSize(const int H, const int W, const int N, const int D) { return rnn2d_lstm_gpu_double_training_reserve_size(H, W, N, D); } template <> void LstmWrapper<float>::GenerateUniform(float* data, size_t n) { curandGenerator_t gen; CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); CHECK_CURAND(curandGenerateUniform(gen, data, n)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); CHECK_CURAND(curandDestroyGenerator(gen)); } template <> void LstmWrapper<double>::GenerateUniform(double* data, size_t n) { curandGenerator_t gen; CHECK_CURAND(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT)); CHECK_CURAND(curandSetPseudoRandomGeneratorSeed(gen, 1234ULL)); CHECK_CURAND(curandGenerateUniformDouble(gen, data, n)); CHECK_CUDA_CALL(cudaDeviceSynchronize()); CHECK_CURAND(curandDestroyGenerator(gen)); } template <> void LstmWrapper<float>::ForwardInference() { rnn2d_lstm_gpu_float_fw_inference(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_); } template <> void LstmWrapper<double>::ForwardInference() { rnn2d_lstm_gpu_double_fw_inference(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_); } template <> void LstmWrapper<float>::ForwardTraining() { rnn2d_lstm_gpu_float_fw_training(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_, rspace_); } template <> void LstmWrapper<double>::ForwardTraining() { rnn2d_lstm_gpu_double_fw_training(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, wspace_, rspace_); } template <> void LstmWrapper<float>::BackwardData() { rnn2d_lstm_gpu_float_bw_data(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, gradOutput_, gradInput_, wspace_, rspace_); } template <> void LstmWrapper<double>::BackwardData() { rnn2d_lstm_gpu_double_bw_data(H_, W_, N_, K_, D_, input_, nullptr, param_, output_, gradOutput_, gradInput_, wspace_, rspace_); } template <> void LstmWrapper<float>::BackwardParam() { rnn2d_lstm_gpu_float_bw_param(H_, W_, N_, K_, D_, input_, output_, 1.0, gradParam_, wspace_, rspace_); } template <> void LstmWrapper<double>::BackwardParam() { rnn2d_lstm_gpu_double_bw_param(H_, W_, N_, K_, D_, input_, output_, 1.0, gradParam_, wspace_, rspace_); } template <typename T> static void BM_fw_inference(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.ForwardInference(); while (state.KeepRunning()) { lstm.ForwardInference(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_fw_training(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.ForwardTraining(); while (state.KeepRunning()) { lstm.ForwardTraining(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_data(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardData(); while (state.KeepRunning()) { lstm.BackwardData(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_param(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardParam(); while (state.KeepRunning()) { lstm.BackwardParam(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } template <typename T> static void BM_bw_ALL(benchmark::State& state) { const int H = state.range(0); const int W = state.range(1); const int N = state.range(2); const int K = state.range(3); const int D = state.range(4); LstmWrapper<T> lstm(H, W, N, K, D); lstm.BackwardData(); while (state.KeepRunning()) { lstm.BackwardData(); lstm.BackwardParam(); } state.SetItemsProcessed(state.iterations() * H * W * N * K * D); } #define INSTANTIATE_BENCHMARKS(TYPE) \ BENCHMARK_TEMPLATE(BM_fw_inference, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_fw_training, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_data, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_param, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime(); \ \ BENCHMARK_TEMPLATE(BM_bw_ALL, TYPE) \ ->Args({DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D}) \ ->Unit(benchmark::kMicrosecond) \ ->UseRealTime() INSTANTIATE_BENCHMARKS(float); INSTANTIATE_BENCHMARKS(double); int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); benchmark::Initialize(&argc, argv); // Get number of CUDA devices and their names. int numDevices = 0; CHECK_CUDA_CALL(cudaGetDeviceCount(&numDevices)); cudaDeviceProp* dp = new cudaDeviceProp[numDevices]; for (int d = 0; d < numDevices; ++d) { CHECK_CUDA_CALL(cudaGetDeviceProperties(&dp[d], d)); } // Set the CUDA device to use if (argc > 1) { CHECK_CUDA_CALL(cudaSetDevice(atoi(argv[1]))); } // Display the name of the CUDA device being used int curDevice = 0; CHECK_CUDA_CALL(cudaGetDevice(&curDevice)); LOG(INFO) << "Found " << numDevices << " CUDA devices, using device " << curDevice << ": " << dp[curDevice].name; LstmWrapper<float>::Initialize(DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D); LstmWrapper<double>::Initialize(DEFAULT_H, DEFAULT_W, DEFAULT_N, DEFAULT_K, DEFAULT_D); benchmark::RunSpecifiedBenchmarks(); LstmWrapper<float>::Destroy(); LstmWrapper<double>::Destroy(); return 0; }
cededec9c8507840a929ef4893dd30160148908d.hip
// !!! This is a file automatically generated by hipify!!! /** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * scalar, vector and matrix multiplication * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <hip/hip_runtime.h> #include <polybenchUtilFuncts.h> #include <util.h> #include <ca.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* ****************************************************************** * * CA Specific Definitions * Need to develop a method to extract this information from source * * ******************************************************************/ #define ITEMS N * N #define FIELDS 2 typedef struct data_item_type { DATA_TYPE a; DATA_TYPE b; } data_item; #define SPARSITY N void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; #ifdef DEBUG if (i < 4) printf("%d\t", i * N + j); #endif } #ifdef DEBUG if (i < 4) printf("\n"); #endif y[i] = ALPHA * tmp[i] + BETA * y[i]; } } __global__ void gesummv_ca_kernel(DATA_TYPE *ca, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; int index = (i * FIELDS) - (i % TILE) * (FIELDS - 1); for(j = 0; j < SPARSITY; j++) { tmp[i] += ca[index + (j * SPARSITY * FIELDS)] * x[j]; y[i] += ca[index + (j * SPARSITY * FIELDS) + TILE] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { printf("%3.2f\n",y_outputFromGpu[i]); fail++; } } fprintf(stderr, "%s\n", (fail > 0 ? "FAILED (GPU)" : "PASSED (GPU)")); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); // printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE *ca, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { // double t_start, t_end; DATA_TYPE *ca_gpu; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; hipMalloc((void **)&ca_gpu, sizeof(DATA_TYPE) * N * N * FIELDS); hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); hipMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); hipMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); hipMemcpy(ca_gpu, ca, sizeof(DATA_TYPE) * N * N * FIELDS, hipMemcpyHostToDevice); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, hipMemcpyHostToDevice); hipMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); hipMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); double t = mysecond(); #ifdef DEFAULT hipLaunchKernelGGL(( gesummv_kernel), dim3(grid), dim3(block), 0, 0, A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); #endif #ifdef CA hipLaunchKernelGGL(( gesummv_ca_kernel), dim3(grid), dim3(block), 0, 0, ca_gpu,x_gpu, y_gpu, tmp_gpu); #endif hipDeviceSynchronize(); t = 1.0E6 * (mysecond() - t); fprintf(stdout, "%3.2f\n", t/1000); hipMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, hipMemcpyDeviceToHost); } int main(int argc, char *argv[]) { DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; DATA_TYPE *ca; data_item *aos; ca = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE) * FIELDS); aos = (data_item *) malloc(ITEMS*sizeof(data_item)); A = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); init(A, x); for (int i = 0; i < ITEMS; i++) { aos[i].a = A[i]; aos[i].b = B[i]; } convert_aos_to_ca(aos, ca, ITEMS, FIELDS, SPARSITY); check_ca_conversion(A, ca, ITEMS, FIELDS, SPARSITY); // convert_aos_to_ca(aos, ca); #ifdef DEBUG check_ca_conversion(A, ca); #endif GPU_argv_init(); gesummvCuda(ca, A, B, x, y, tmp, y_outputFromGpu); gesummv(A, B, x, y, tmp); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
cededec9c8507840a929ef4893dd30160148908d.cu
/** * gesummv.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * scalar, vector and matrix multiplication * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include <polybenchUtilFuncts.h> #include <util.h> #include <ca.h> //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 #define GPU_DEVICE 0 /* Problem size */ #define N 4096 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 256 #define DIM_THREAD_BLOCK_Y 1 /* Declared constant values for ALPHA and BETA (same as values in PolyBench 2.0) */ #define ALPHA 43532.0f #define BETA 12313.0f /* ****************************************************************** * * CA Specific Definitions * Need to develop a method to extract this information from source * * ******************************************************************/ #define ITEMS N * N #define FIELDS 2 typedef struct data_item_type { DATA_TYPE a; DATA_TYPE b; } data_item; #define SPARSITY N void gesummv(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i, j; for (i = 0; i < N; i++) { tmp[i] = 0; y[i] = 0; for (j = 0; j < N; j++) { tmp[i] = A[i*N + j] * x[j] + tmp[i]; y[i] = B[i*N + j] * x[j] + y[i]; #ifdef DEBUG if (i < 4) printf("%d\t", i * N + j); #endif } #ifdef DEBUG if (i < 4) printf("\n"); #endif y[i] = ALPHA * tmp[i] + BETA * y[i]; } } __global__ void gesummv_ca_kernel(DATA_TYPE *ca, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; int index = (i * FIELDS) - (i % TILE) * (FIELDS - 1); for(j = 0; j < SPARSITY; j++) { tmp[i] += ca[index + (j * SPARSITY * FIELDS)] * x[j]; y[i] += ca[index + (j * SPARSITY * FIELDS) + TILE] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void init(DATA_TYPE* A, DATA_TYPE* x) { int i, j; for (i = 0; i < N; i++) { x[i] = ((DATA_TYPE) i) / N; for (j = 0; j < N; j++) { A[i*N + j] = ((DATA_TYPE) i*j) / N; } } } void compareResults(DATA_TYPE* y, DATA_TYPE* y_outputFromGpu) { int i, fail; fail = 0; for (i=0; i<(N); i++) { if (percentDiff(y[i], y_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD) { printf("%3.2f\n",y_outputFromGpu[i]); fail++; } } fprintf(stderr, "%s\n", (fail > 0 ? "FAILED (GPU)" : "PASSED (GPU)")); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); // printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void gesummv_kernel(DATA_TYPE *a, DATA_TYPE *b, DATA_TYPE *x, DATA_TYPE *y, DATA_TYPE *tmp) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { int j; for(j = 0; j < N; j++) { tmp[i] += a[i * N + j] * x[j]; y[i] += b[i * N + j] * x[j]; } y[i] = ALPHA * tmp[i] + BETA * y[i]; } } void gesummvCuda(DATA_TYPE *ca, DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* x, DATA_TYPE* y, DATA_TYPE* tmp, DATA_TYPE* y_outputFromGpu) { // double t_start, t_end; DATA_TYPE *ca_gpu; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *x_gpu; DATA_TYPE *y_gpu; DATA_TYPE *tmp_gpu; cudaMalloc((void **)&ca_gpu, sizeof(DATA_TYPE) * N * N * FIELDS); cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * N * N); cudaMalloc((void **)&x_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&y_gpu, sizeof(DATA_TYPE) * N); cudaMalloc((void **)&tmp_gpu, sizeof(DATA_TYPE) * N); cudaMemcpy(ca_gpu, ca, sizeof(DATA_TYPE) * N * N * FIELDS, cudaMemcpyHostToDevice); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice); cudaMemcpy(x_gpu, x, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(y_gpu, y, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); cudaMemcpy(tmp_gpu, tmp, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), 1); double t = mysecond(); #ifdef DEFAULT gesummv_kernel<<< grid, block>>>(A_gpu,B_gpu,x_gpu, y_gpu, tmp_gpu); #endif #ifdef CA gesummv_ca_kernel<<<grid, block>>>(ca_gpu,x_gpu, y_gpu, tmp_gpu); #endif cudaThreadSynchronize(); t = 1.0E6 * (mysecond() - t); fprintf(stdout, "%3.2f\n", t/1000); cudaMemcpy(y_outputFromGpu, y_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost); } int main(int argc, char *argv[]) { DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* x; DATA_TYPE* y; DATA_TYPE* y_outputFromGpu; DATA_TYPE* tmp; DATA_TYPE *ca; data_item *aos; ca = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE) * FIELDS); aos = (data_item *) malloc(ITEMS*sizeof(data_item)); A = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(ITEMS*sizeof(DATA_TYPE)); x = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); y_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); tmp = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE)); init(A, x); for (int i = 0; i < ITEMS; i++) { aos[i].a = A[i]; aos[i].b = B[i]; } convert_aos_to_ca(aos, ca, ITEMS, FIELDS, SPARSITY); check_ca_conversion(A, ca, ITEMS, FIELDS, SPARSITY); // convert_aos_to_ca(aos, ca); #ifdef DEBUG check_ca_conversion(A, ca); #endif GPU_argv_init(); gesummvCuda(ca, A, B, x, y, tmp, y_outputFromGpu); gesummv(A, B, x, y, tmp); compareResults(y, y_outputFromGpu); free(A); free(B); free(x); free(y); free(y_outputFromGpu); free(tmp); return 0; }
d266adb5c9acbda7d506c0372985895000bf072e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , const float * i3_data, int i3_str_0 , const float * i4_data, int i4_str_0 , const float * i5_data, int i5_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = ii_i1_data[0] + ii_i2_data[0]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - ii_i0_data[0]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , const float * i3_data, int i3_str_0, int i3_str_1 , const float * i4_data, int i4_str_0, int i4_str_1 , const float * i5_data, int i5_str_0, int i5_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i0_data += pos1 * i0_str_1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = ii_i1_data[0] + ii_i2_data[0]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - ii_i0_data[0]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , const float * i3_data , const float * i4_data , const float * i5_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = i1_data[i] + i2_data[i]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - i0_data[i]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = i0_data[i] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } o0_data[i] = o0_i; } } static void can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str, const float * i5_data, const int * i5_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[6][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[3][i]=i3_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[4][i]=i4_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[5][i]=i5_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<6;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<6;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_0[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[0], nd_collapse_0); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_0[i]==0) nd_collapse_[i]=0; } int nd_collapse_1[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[3][i-1]=local_str[3][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[3][j-1]=local_str[3][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[4][i-1]=local_str[4][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[4][j-1]=local_str[4][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[5][i-1]=local_str[5][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[5][j-1]=local_str[5][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_1), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = ::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = ::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = ::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); hipLaunchKernelGGL(( kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_2), dim3(n_blocks), dim3(threads_per_block), 0, 0, numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; hipError_t err = hipGetLastError(); if( hipSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", hipGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230 { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V9; PyObject* storage_V11; PyObject* storage_V13; PyObject* storage_V1; __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V13, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V9); Py_XINCREF(storage_V11); Py_XINCREF(storage_V13); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V9 = storage_V9; this->storage_V11 = storage_V11; this->storage_V13 = storage_V13; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_9: double __DUMMY_9; __label_11: double __DUMMY_11; __label_13: double __DUMMY_13; __label_16: double __DUMMY_16; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V9); Py_XDECREF(this->storage_V11); Py_XDECREF(this->storage_V13); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; PyObject* py_V9; CudaNdarray * V9; PyObject* py_V11; CudaNdarray * V11; PyObject* py_V13; CudaNdarray * V13; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V7)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n"; //std::cerr << "c_extract " << V7->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V7)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "bcast check 0 passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { py_V9 = PyList_GET_ITEM(storage_V9, 0); {Py_XINCREF(py_V9);} assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V9)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); V9 = (CudaNdarray*)py_V9; //std::cerr << "c_extract " << V9 << '\n'; if (V9->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V9->nd); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V9)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V9)[0], 0); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V9 << "checking bcast 0 <" << V9->str<< ">\n"; //std::cerr << "c_extract " << V9->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V9)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V9)[0], 0); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V9)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V9)[1], 1); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V9 << "checking bcast 1 <" << V9->str<< ">\n"; //std::cerr << "c_extract " << V9->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V9)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V9)[1], 1); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "bcast check 1 passed\n"; assert(V9); Py_INCREF(py_V9); } else if (py_V9 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract done " << V9 << '\n'; { py_V11 = PyList_GET_ITEM(storage_V11, 0); {Py_XINCREF(py_V11);} assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V11)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); V11 = (CudaNdarray*)py_V11; //std::cerr << "c_extract " << V11 << '\n'; if (V11->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V11->nd); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V11)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V11)[0], 0); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V11 << "checking bcast 0 <" << V11->str<< ">\n"; //std::cerr << "c_extract " << V11->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V11)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V11)[0], 0); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V11)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V11)[1], 1); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V11 << "checking bcast 1 <" << V11->str<< ">\n"; //std::cerr << "c_extract " << V11->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V11)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V11)[1], 1); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "bcast check 1 passed\n"; assert(V11); Py_INCREF(py_V11); } else if (py_V11 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract done " << V11 << '\n'; { py_V13 = PyList_GET_ITEM(storage_V13, 0); {Py_XINCREF(py_V13);} assert(py_V13->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V13)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); V13 = (CudaNdarray*)py_V13; //std::cerr << "c_extract " << V13 << '\n'; if (V13->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V13->nd); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V13)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V13)[0], 0); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V13 << "checking bcast 0 <" << V13->str<< ">\n"; //std::cerr << "c_extract " << V13->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V13)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V13)[0], 0); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V13)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V13)[1], 1); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V13 << "checking bcast 1 <" << V13->str<< ">\n"; //std::cerr << "c_extract " << V13->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V13)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V13)[1], 1); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "bcast check 1 passed\n"; assert(V13); Py_INCREF(py_V13); } else if (py_V13 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract done " << V13 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {0, 0}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {1, 0}; int broadcasts_V9[2] = {1, 1}; int broadcasts_V11[2] = {1, 1}; int broadcasts_V13[2] = {1, 1}; //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V9\n"; if (2 != V9->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V9->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i]; if ((!(broadcasts_V9[i] && CudaNdarray_HOST_DIMS(V9)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V9)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V9 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 3 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V9)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V11\n"; if (2 != V11->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V11->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i]; if ((!(broadcasts_V11[i] && CudaNdarray_HOST_DIMS(V11)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V11)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V11 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 4 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V11)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V13\n"; if (2 != V13->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V13->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V13)[i] : dims[i]; if ((!(broadcasts_V13[i] && CudaNdarray_HOST_DIMS(V13)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V13)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V13 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 5 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V13)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } Py_XDECREF(V1); V1 = V3; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 0, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9) , CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11) , CudaNdarray_DEV_DATA(V13), CudaNdarray_HOST_STRIDES(V13) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} END\n"; __label_15: double __DUMMY_15; } __label_14: //std::cerr << "cleanup " << py_V13 << " " << V13 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); if (V13) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V13, (V13->ob_refcnt)); Py_XDECREF(V13); } //std::cerr << "cleanup done" << py_V13 << "\n"; {Py_XDECREF(py_V13);} double __DUMMY_14; } __label_12: //std::cerr << "cleanup " << py_V11 << " " << V11 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); if (V11) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt)); Py_XDECREF(V11); } //std::cerr << "cleanup done" << py_V11 << "\n"; {Py_XDECREF(py_V11);} double __DUMMY_12; } __label_10: //std::cerr << "cleanup " << py_V9 << " " << V9 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); if (V9) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt)); Py_XDECREF(V9); } //std::cerr << "cleanup done" << py_V9 << "\n"; {Py_XDECREF(py_V9);} double __DUMMY_10; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_executor(__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230* self) { return self->run(); } static void __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_destructor(void* executor, void* self) { delete ((__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (8 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 8, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230* struct_ptr = new __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6),PyTuple_GET_ITEM(argtuple, 7) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_executor), struct_ptr, __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init9b0fd5da0fea7d8a4504cbf7bb8a5230(void){ (void) Py_InitModule("9b0fd5da0fea7d8a4504cbf7bb8a5230", MyMethods); }
d266adb5c9acbda7d506c0372985895000bf072e.cu
#include <Python.h> #include <iostream> #include "theano_mod_helper.h" #include "cuda_ndarray.cuh" ////////////////////// //// Support Code ////////////////////// #define INTDIV_POW2(a, b) (a >> b) #define INTMOD_POW2(a, b) (a & ((1<<b)-1)) // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_1(unsigned int numEls , const int dim0 , const float * i0_data, int i0_str_0 , const float * i1_data, int i1_str_0 , const float * i2_data, int i2_str_0 , const float * i3_data, int i3_str_0 , const float * i4_data, int i4_str_0 , const float * i5_data, int i5_str_0 , float * o0_data, int o0_str_0 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = ii_i1_data[0] + ii_i2_data[0]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - ii_i0_data[0]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_2(unsigned int numEls , const int dim0, const int dim1 , const float * i0_data, int i0_str_0, int i0_str_1 , const float * i1_data, int i1_str_0, int i1_str_1 , const float * i2_data, int i2_str_0, int i2_str_1 , const float * i3_data, int i3_str_0, int i3_str_1 , const float * i4_data, int i4_str_0, int i4_str_1 , const float * i5_data, int i5_str_0, int i5_str_1 , float * o0_data, int o0_str_0, int o0_str_1 ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { int ii = i; const float * ii_i0_data = i0_data; const float * ii_i1_data = i1_data; const float * ii_i2_data = i2_data; float * ii_o0_data = o0_data; int pos1 = ii % dim1; ii = ii / dim1; ii_i0_data += pos1 * i0_str_1; ii_i1_data += pos1 * i1_str_1; ii_i2_data += pos1 * i2_str_1; ii_o0_data += pos1 * o0_str_1; int pos0 = ii; ii_i0_data += pos0 * i0_str_0; ii_i1_data += pos0 * i1_str_0; ii_i2_data += pos0 * i2_str_0; ii_o0_data += pos0 * o0_str_0; npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = ii_i1_data[0] + ii_i2_data[0]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - ii_i0_data[0]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = ii_i0_data[0] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } ii_o0_data[0] = o0_i; } } // GpuElemwise{Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))}}[(0, 0)] // node.op.destroy_map={0: [0]} // Input 0 CudaNdarrayType(float32, matrix) // Input 1 CudaNdarrayType(float32, matrix) // Input 2 CudaNdarrayType(float32, row) // Input 3 CudaNdarrayType(float32, (True, True)) // Input 4 CudaNdarrayType(float32, (True, True)) // Input 5 CudaNdarrayType(float32, (True, True)) // Output 0 CudaNdarrayType(float32, matrix) static __global__ void kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous (unsigned int numEls , const float * i0_data , const float * i1_data , const float * i2_data , const float * i3_data , const float * i4_data , const float * i5_data , float * o0_data ) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; const float ii_i3_value = i3_data[0]; const float ii_i4_value = i4_data[0]; const float ii_i5_value = i5_data[0]; for (int i = idx; i < numEls; i += numThreads) { npy_float32 o0_i; { npy_float32 V_DUMMY_ID__tmp1; { npy_float32 V_DUMMY_ID__0_tmp1; V_DUMMY_ID__0_tmp1 = i1_data[i] + i2_data[i]; npy_float32 V_DUMMY_ID__0_tmp2; V_DUMMY_ID__0_tmp2 = V_DUMMY_ID__0_tmp1 < -88.0f ? 0.0 : V_DUMMY_ID__0_tmp1 > 15.0f ? 1.0f : 1.0f /(1.0f + exp(-V_DUMMY_ID__0_tmp1)); V_DUMMY_ID__tmp1 = V_DUMMY_ID__0_tmp2 < ii_i3_value ? ii_i3_value : V_DUMMY_ID__0_tmp2 > ii_i4_value ? ii_i4_value : V_DUMMY_ID__0_tmp2; } npy_float32 V_DUMMY_ID__tmp2; V_DUMMY_ID__tmp2 = ii_i5_value - i0_data[i]; npy_float32 V_DUMMY_ID__tmp3; V_DUMMY_ID__tmp3 = ii_i5_value - V_DUMMY_ID__tmp1; npy_float32 V_DUMMY_ID__tmp4; V_DUMMY_ID__tmp4 = log(V_DUMMY_ID__tmp1); npy_float32 V_DUMMY_ID__tmp5; V_DUMMY_ID__tmp5 = log(V_DUMMY_ID__tmp3); npy_float32 V_DUMMY_ID__tmp6; V_DUMMY_ID__tmp6 = i0_data[i] * V_DUMMY_ID__tmp4; npy_float32 V_DUMMY_ID__tmp7; V_DUMMY_ID__tmp7 = V_DUMMY_ID__tmp2 * V_DUMMY_ID__tmp5; o0_i = V_DUMMY_ID__tmp6 + V_DUMMY_ID__tmp7; } o0_data[i] = o0_i; } } static void can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(int nd, const int * dims, const int * strides, int collapse[]) { //can we collapse dims[i] and dims[i-1] for(int i=nd-1;i>0;i--){ if(strides[i]*dims[i]==strides[i-1]){//the dims nd-1 are not strided again dimension nd collapse[i]=1; }else collapse[i]=0; } } static int callkernel_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(unsigned int numEls, const int d, const int * dims, const float * i0_data, const int * i0_str, const float * i1_data, const int * i1_str, const float * i2_data, const int * i2_str, const float * i3_data, const int * i3_str, const float * i4_data, const int * i4_str, const float * i5_data, const int * i5_str, float * o0_data, const int * o0_str) { numEls = dims[0]*dims[1]*1; int local_dims[2]; int local_str[6][2]; int local_ostr[1][2]; int nd_collapse = 2; for(int i=0;i<2;i++){//init new dim local_dims[i]=dims[i]; } for(int i=0;i<2;i++){//init new strides local_str[0][i]=i0_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[1][i]=i1_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[2][i]=i2_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[3][i]=i3_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[4][i]=i4_str[i]; } for(int i=0;i<2;i++){//init new strides local_str[5][i]=i5_str[i]; } for(int i=0;i<2;i++){//init new strides local_ostr[0][i]=o0_str[i]; } for(int id=0;id<nd_collapse;id++){ bool all_broadcast=true; for(int input_id=0;input_id<6;input_id++){ if(local_str[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } for(int input_id=0;input_id<1;input_id++){ if(local_ostr[input_id][id]!=0 || local_dims[id]!=1) all_broadcast= false; } if(all_broadcast){ for(int j=id+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; for(int input_id=0;input_id<6;input_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_str[input_id][j-1]=local_str[input_id][j]; } } for(int output_id=0;output_id<1;output_id++){ for(int j=id+1;j<nd_collapse;j++){//remove dims i from the array local_ostr[output_id][j-1]=local_ostr[output_id][j]; } } nd_collapse--; id--; } } int nd_collapse_[2] = {1,1}; int nd_collapse_0[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[0], nd_collapse_0); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_0[i]==0) nd_collapse_[i]=0; } int nd_collapse_1[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[1], nd_collapse_1); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_1[i]==0) nd_collapse_[i]=0; } int nd_collapse_2[2] = {1,1}; can_collapse_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(nd_collapse, local_dims, local_str[2], nd_collapse_2); for(int i=0;i<nd_collapse;i++){ if(nd_collapse_2[i]==0) nd_collapse_[i]=0; } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[0][i-1]=local_str[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[0][j-1]=local_str[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[1][i-1]=local_str[1][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[1][j-1]=local_str[1][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[2][i-1]=local_str[2][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[2][j-1]=local_str[2][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[3][i-1]=local_str[3][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[3][j-1]=local_str[3][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[4][i-1]=local_str[4][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[4][j-1]=local_str[4][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_str[5][i-1]=local_str[5][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_str[5][j-1]=local_str[5][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_ostr[0][i-1]=local_ostr[0][i];//set new strides for(int j=i+1;j<nd_collapse;j++)//remove stride i from the array local_ostr[0][j-1]=local_ostr[0][j]; } } for(int i=nd_collapse-1;i>0;i--){ if(nd_collapse_[i]==1){ local_dims[i-1]*=local_dims[i];//set new dims for(int j=i+1;j<nd_collapse;j++)//remove dims i from the array local_dims[j-1]=local_dims[j]; } } for(int i=1, end=nd_collapse;i<end;i++){ if(nd_collapse_[i]==1)nd_collapse--; } if(nd_collapse == 1 && local_str[0][nd_collapse-1]==1 && local_str[1][nd_collapse-1]==1 && local_str[2][nd_collapse-1]==1 && local_ostr[0][nd_collapse-1]==1 ){nd_collapse=0;} if(numEls==0) return 0; switch (nd_collapse==0?0:min(2,nd_collapse)) { case 0: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, o0_data); //std::cerr << "calling callkernel returned\n"; CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, i0_data, i1_data, i2_data, i3_data, i4_data, i5_data, o0_data)"); return -1; } return 0; } break; case 1: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_1<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], o0_data, local_ostr[0][0]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], i0_data, local_str[0][0], i1_data, local_str[1][0], i2_data, local_str[2][0], i3_data, local_str[3][0], i4_data, local_str[4][0], i5_data, local_str[5][0], o0_data, local_ostr[0][0])"); return -1; } return 0; } break; case 2: { //first use at least a full warp int threads_per_block = std::min(numEls, (unsigned int)32); //WARP SIZE //next start adding multiprocessors int n_blocks = std::min(numEls/threads_per_block + (numEls % threads_per_block?1:0), (unsigned int)30); // UP TO NUMBER OF MULTIPROCESSORS // next start adding more warps per multiprocessor if (threads_per_block * n_blocks < numEls) threads_per_block = std::min(numEls/n_blocks, (unsigned int)NUM_VECTOR_OP_THREADS_PER_BLOCK); kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_2<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], o0_data, local_ostr[0][0], local_ostr[0][1]); CNDA_THREAD_SYNC; cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { PyErr_Format(PyExc_RuntimeError, "Cuda error: %s: %s.\n n_blocks=%i threads_per_block=%i\n Call: %s\n", "GpuElemwise node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0 Composite", cudaGetErrorString(err), n_blocks, threads_per_block, "kernel_Composite_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0_Ccontiguous<<<n_blocks, threads_per_block>>>(numEls, local_dims[0], local_dims[1], i0_data, local_str[0][0], local_str[0][1], i1_data, local_str[1][0], local_str[1][1], i2_data, local_str[2][0], local_str[2][1], i3_data, local_str[3][0], local_str[3][1], i4_data, local_str[4][0], local_str[4][1], i5_data, local_str[5][0], local_str[5][1], o0_data, local_ostr[0][0], local_ostr[0][1])"); return -1; } return 0; } break; } return -2; } namespace { struct __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230 { PyObject* __ERROR; PyObject* storage_V3; PyObject* storage_V5; PyObject* storage_V7; PyObject* storage_V9; PyObject* storage_V11; PyObject* storage_V13; PyObject* storage_V1; __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230() { // This is only somewhat safe because we: // 1) Are not a virtual class // 2) Do not use any virtual classes in the members // 3) Deal with mostly POD and pointers // If this changes, we would have to revise this, but for // now I am tired of chasing segfaults because // initialization code had an error and some pointer has // a junk value. memset(this, 0, sizeof(*this)); } ~__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230(void) { cleanup(); } int init(PyObject* __ERROR, PyObject* storage_V3, PyObject* storage_V5, PyObject* storage_V7, PyObject* storage_V9, PyObject* storage_V11, PyObject* storage_V13, PyObject* storage_V1) { Py_XINCREF(storage_V3); Py_XINCREF(storage_V5); Py_XINCREF(storage_V7); Py_XINCREF(storage_V9); Py_XINCREF(storage_V11); Py_XINCREF(storage_V13); Py_XINCREF(storage_V1); this->storage_V3 = storage_V3; this->storage_V5 = storage_V5; this->storage_V7 = storage_V7; this->storage_V9 = storage_V9; this->storage_V11 = storage_V11; this->storage_V13 = storage_V13; this->storage_V1 = storage_V1; this->__ERROR = __ERROR; return 0; } void cleanup(void) { __label_1: double __DUMMY_1; __label_3: double __DUMMY_3; __label_5: double __DUMMY_5; __label_7: double __DUMMY_7; __label_9: double __DUMMY_9; __label_11: double __DUMMY_11; __label_13: double __DUMMY_13; __label_16: double __DUMMY_16; Py_XDECREF(this->storage_V3); Py_XDECREF(this->storage_V5); Py_XDECREF(this->storage_V7); Py_XDECREF(this->storage_V9); Py_XDECREF(this->storage_V11); Py_XDECREF(this->storage_V13); Py_XDECREF(this->storage_V1); } int run(void) { int __failure = 0; PyObject* py_V1; CudaNdarray * V1; PyObject* py_V3; CudaNdarray * V3; PyObject* py_V5; CudaNdarray * V5; PyObject* py_V7; CudaNdarray * V7; PyObject* py_V9; CudaNdarray * V9; PyObject* py_V11; CudaNdarray * V11; PyObject* py_V13; CudaNdarray * V13; { py_V1 = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} if (py_V1 == Py_None) { V1 = NULL; } else { assert(py_V1->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V1)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); V1 = (CudaNdarray*)py_V1; //std::cerr << "c_extract " << V1 << '\n'; if (V1->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V1->nd); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract " << V1 << " nd check passed\n"; assert(V1); Py_INCREF(py_V1); } else if (py_V1 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V1 = NULL; { __failure = 2; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_2;}; } //std::cerr << "c_extract done " << V1 << '\n'; } { py_V3 = PyList_GET_ITEM(storage_V3, 0); {Py_XINCREF(py_V3);} assert(py_V3->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V3)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); V3 = (CudaNdarray*)py_V3; //std::cerr << "c_extract " << V3 << '\n'; if (V3->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V3->nd); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract " << V3 << " nd check passed\n"; assert(V3); Py_INCREF(py_V3); } else if (py_V3 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V3 = NULL; { __failure = 4; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_4;}; } //std::cerr << "c_extract done " << V3 << '\n'; { py_V5 = PyList_GET_ITEM(storage_V5, 0); {Py_XINCREF(py_V5);} assert(py_V5->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V5)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); V5 = (CudaNdarray*)py_V5; //std::cerr << "c_extract " << V5 << '\n'; if (V5->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V5->nd); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract " << V5 << " nd check passed\n"; assert(V5); Py_INCREF(py_V5); } else if (py_V5 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V5 = NULL; { __failure = 6; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_6;}; } //std::cerr << "c_extract done " << V5 << '\n'; { py_V7 = PyList_GET_ITEM(storage_V7, 0); {Py_XINCREF(py_V7);} assert(py_V7->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V7)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); V7 = (CudaNdarray*)py_V7; //std::cerr << "c_extract " << V7 << '\n'; if (V7->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V7->nd); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V7)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V7 << "checking bcast 0 <" << V7->str<< ">\n"; //std::cerr << "c_extract " << V7->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V7)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V7)[0], 0); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract " << V7 << "bcast check 0 passed\n"; assert(V7); Py_INCREF(py_V7); } else if (py_V7 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V7 = NULL; { __failure = 8; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_8;}; } //std::cerr << "c_extract done " << V7 << '\n'; { py_V9 = PyList_GET_ITEM(storage_V9, 0); {Py_XINCREF(py_V9);} assert(py_V9->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V9)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); V9 = (CudaNdarray*)py_V9; //std::cerr << "c_extract " << V9 << '\n'; if (V9->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V9->nd); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V9)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V9)[0], 0); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V9 << "checking bcast 0 <" << V9->str<< ">\n"; //std::cerr << "c_extract " << V9->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V9)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V9)[0], 0); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V9)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V9)[1], 1); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V9 << "checking bcast 1 <" << V9->str<< ">\n"; //std::cerr << "c_extract " << V9->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V9)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V9)[1], 1); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract " << V9 << "bcast check 1 passed\n"; assert(V9); Py_INCREF(py_V9); } else if (py_V9 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V9 = NULL; { __failure = 10; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_10;}; } //std::cerr << "c_extract done " << V9 << '\n'; { py_V11 = PyList_GET_ITEM(storage_V11, 0); {Py_XINCREF(py_V11);} assert(py_V11->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V11)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); V11 = (CudaNdarray*)py_V11; //std::cerr << "c_extract " << V11 << '\n'; if (V11->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V11->nd); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V11)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V11)[0], 0); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V11 << "checking bcast 0 <" << V11->str<< ">\n"; //std::cerr << "c_extract " << V11->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V11)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V11)[0], 0); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V11)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V11)[1], 1); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V11 << "checking bcast 1 <" << V11->str<< ">\n"; //std::cerr << "c_extract " << V11->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V11)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V11)[1], 1); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract " << V11 << "bcast check 1 passed\n"; assert(V11); Py_INCREF(py_V11); } else if (py_V11 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V11 = NULL; { __failure = 12; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_12;}; } //std::cerr << "c_extract done " << V11 << '\n'; { py_V13 = PyList_GET_ITEM(storage_V13, 0); {Py_XINCREF(py_V13);} assert(py_V13->ob_refcnt >= 2); // There should be at least one ref from the container object, // and one ref from the local scope. if (CudaNdarray_Check(py_V13)) { //fprintf(stderr, "c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); V13 = (CudaNdarray*)py_V13; //std::cerr << "c_extract " << V13 << '\n'; if (V13->nd != 2) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has rank %i, it was supposed to have rank 2", V13->nd); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << " nd check passed\n"; if (CudaNdarray_HOST_DIMS(V13)[0] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V13)[0], 0); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "dim check 0 passed\n"; //std::cerr << "c_extract " << V13 << "checking bcast 0 <" << V13->str<< ">\n"; //std::cerr << "c_extract " << V13->str[0] << "\n"; if (CudaNdarray_HOST_STRIDES(V13)[0]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V13)[0], 0); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "bcast check 0 passed\n"; if (CudaNdarray_HOST_DIMS(V13)[1] != 1) { PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has dim %i on broadcastable dimension %i", CudaNdarray_HOST_DIMS(V13)[1], 1); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "dim check 1 passed\n"; //std::cerr << "c_extract " << V13 << "checking bcast 1 <" << V13->str<< ">\n"; //std::cerr << "c_extract " << V13->str[1] << "\n"; if (CudaNdarray_HOST_STRIDES(V13)[1]) { //std::cerr << "c_extract bad stride detected...\n"; PyErr_Format(PyExc_RuntimeError, "c_extract: Some CudaNdarray has a nonzero stride %i on a broadcastable dimension %i", CudaNdarray_HOST_STRIDES(V13)[1], 1); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract " << V13 << "bcast check 1 passed\n"; assert(V13); Py_INCREF(py_V13); } else if (py_V13 == Py_None) { PyErr_SetString(PyExc_TypeError, "expected a CudaNdarray, not None"); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } else { //fprintf(stderr, "FAILING c_extract CNDA object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); PyErr_SetString(PyExc_TypeError, "Argument not a CudaNdarray"); V13 = NULL; { __failure = 14; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_14;}; } //std::cerr << "c_extract done " << V13 << '\n'; { // Op class GpuElemwise //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} START\n"; //standard elemwise size checks int dims[2] = {1,1}; int broadcasts_V3[2] = {0, 0}; int broadcasts_V5[2] = {0, 0}; int broadcasts_V7[2] = {1, 0}; int broadcasts_V9[2] = {1, 1}; int broadcasts_V11[2] = {1, 1}; int broadcasts_V13[2] = {1, 1}; //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V3\n"; if (2 != V3->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V3->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V3)[i] : dims[i]; if ((!(broadcasts_V3[i] && CudaNdarray_HOST_DIMS(V3)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V3)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V3 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 0 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V3)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V5\n"; if (2 != V5->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V5->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V5)[i] : dims[i]; if ((!(broadcasts_V5[i] && CudaNdarray_HOST_DIMS(V5)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V5)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V5 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 1 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V5)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V7\n"; if (2 != V7->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V7->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V7)[i] : dims[i]; if ((!(broadcasts_V7[i] && CudaNdarray_HOST_DIMS(V7)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V7)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V7 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 2 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V7)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V9\n"; if (2 != V9->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V9->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V9)[i] : dims[i]; if ((!(broadcasts_V9[i] && CudaNdarray_HOST_DIMS(V9)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V9)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V9 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 3 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V9)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V11\n"; if (2 != V11->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V11->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V11)[i] : dims[i]; if ((!(broadcasts_V11[i] && CudaNdarray_HOST_DIMS(V11)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V11)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V11 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 4 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V11)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V13\n"; if (2 != V13->nd) { PyErr_Format(PyExc_TypeError, "need 2 dims, not %i", V13->nd); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } for (int i = 0; i< 2; ++i) { dims[i] = (dims[i] == 1) ? CudaNdarray_HOST_DIMS(V13)[i] : dims[i]; if ((!(broadcasts_V13[i] && CudaNdarray_HOST_DIMS(V13)[i] == 1)) && (dims[i] != CudaNdarray_HOST_DIMS(V13)[i])) { //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} checking input V13 failed\n"; PyErr_Format(PyExc_ValueError, "GpuElemwise. Input dimension mis-match. Input" " 5 (indices start at 0) has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V13)[i], dims[i] ); { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } Py_XDECREF(V1); V1 = V3; Py_INCREF(V1); for (int i = 0; (i< 2) && (V1); ++i) { if (dims[i] != CudaNdarray_HOST_DIMS(V1)[i]) { PyErr_Format(PyExc_ValueError, "GpuElemwise. Output dimension mis-match. Output" " 0 (indices start at 0), working inplace" " on input 0, has shape[%i] == %i" ", but the output's size on that axis is %i.", i, CudaNdarray_HOST_DIMS(V1)[i], dims[i] ); Py_DECREF(V1); V1 = NULL; { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } } //std::cerr << "ELEMWISE NEW V1 nd" << V1->nd << "\n"; //std::cerr << "ELEMWISE NEW V1 data" << V1->devdata << "\n"; { //new block so that failure gotos don't skip over variable initialization //std::cerr << "calling callkernel\n"; if (callkernel_node_9b0fd5da0fea7d8a4504cbf7bb8a5230_0(1, 0, dims , CudaNdarray_DEV_DATA(V3), CudaNdarray_HOST_STRIDES(V3) , CudaNdarray_DEV_DATA(V5), CudaNdarray_HOST_STRIDES(V5) , CudaNdarray_DEV_DATA(V7), CudaNdarray_HOST_STRIDES(V7) , CudaNdarray_DEV_DATA(V9), CudaNdarray_HOST_STRIDES(V9) , CudaNdarray_DEV_DATA(V11), CudaNdarray_HOST_STRIDES(V11) , CudaNdarray_DEV_DATA(V13), CudaNdarray_HOST_STRIDES(V13) , CudaNdarray_DEV_DATA(V1), CudaNdarray_HOST_STRIDES(V1) )) { // error Py_DECREF(V1); V1 = NULL; { __failure = 15; if (!PyErr_Occurred()) { PyErr_SetString(PyExc_RuntimeError, "Unexpected error in an Op's C code. " "No Python exception was set."); } goto __label_15;}; } else // no error { } } //std::cerr << "C_CODE Composite{((i0 * log(Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4))) + ((i5 - i0) * log((i5 - Composite{clip(scalar_sigmoid((i0 + i1)), i2, i3)}(i1, i2, i3, i4)))))} END\n"; __label_15: double __DUMMY_15; } __label_14: //std::cerr << "cleanup " << py_V13 << " " << V13 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V13, (py_V13->ob_refcnt)); if (V13) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V13, (V13->ob_refcnt)); Py_XDECREF(V13); } //std::cerr << "cleanup done" << py_V13 << "\n"; {Py_XDECREF(py_V13);} double __DUMMY_14; } __label_12: //std::cerr << "cleanup " << py_V11 << " " << V11 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V11, (py_V11->ob_refcnt)); if (V11) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V11, (V11->ob_refcnt)); Py_XDECREF(V11); } //std::cerr << "cleanup done" << py_V11 << "\n"; {Py_XDECREF(py_V11);} double __DUMMY_12; } __label_10: //std::cerr << "cleanup " << py_V9 << " " << V9 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V9, (py_V9->ob_refcnt)); if (V9) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V9, (V9->ob_refcnt)); Py_XDECREF(V9); } //std::cerr << "cleanup done" << py_V9 << "\n"; {Py_XDECREF(py_V9);} double __DUMMY_10; } __label_8: //std::cerr << "cleanup " << py_V7 << " " << V7 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V7, (py_V7->ob_refcnt)); if (V7) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V7, (V7->ob_refcnt)); Py_XDECREF(V7); } //std::cerr << "cleanup done" << py_V7 << "\n"; {Py_XDECREF(py_V7);} double __DUMMY_8; } __label_6: //std::cerr << "cleanup " << py_V5 << " " << V5 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V5, (py_V5->ob_refcnt)); if (V5) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V5, (V5->ob_refcnt)); Py_XDECREF(V5); } //std::cerr << "cleanup done" << py_V5 << "\n"; {Py_XDECREF(py_V5);} double __DUMMY_6; } __label_4: //std::cerr << "cleanup " << py_V3 << " " << V3 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V3, (py_V3->ob_refcnt)); if (V3) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V3, (V3->ob_refcnt)); Py_XDECREF(V3); } //std::cerr << "cleanup done" << py_V3 << "\n"; {Py_XDECREF(py_V3);} double __DUMMY_4; } __label_2: if (!__failure) { //std::cerr << "sync\n"; if (NULL == V1) { // failure: sync None to storage Py_XDECREF(py_V1); py_V1 = Py_None; Py_INCREF(py_V1); } else { if (py_V1 != (PyObject*)V1) { Py_XDECREF(py_V1); py_V1 = (PyObject*)V1; Py_INCREF(py_V1); } assert(py_V1->ob_refcnt); } PyObject* old = PyList_GET_ITEM(storage_V1, 0); {Py_XINCREF(py_V1);} PyList_SET_ITEM(storage_V1, 0, py_V1); {Py_XDECREF(old);} } //std::cerr << "cleanup " << py_V1 << " " << V1 << "\n"; //fprintf(stderr, "c_cleanup CNDA py_object w refcnt %p %i\n", py_V1, (py_V1->ob_refcnt)); if (V1) { //fprintf(stderr, "c_cleanup CNDA cn_object w refcnt %p %i\n", V1, (V1->ob_refcnt)); Py_XDECREF(V1); } //std::cerr << "cleanup done" << py_V1 << "\n"; {Py_XDECREF(py_V1);} double __DUMMY_2; } if (__failure) { // When there is a failure, this code puts the exception // in __ERROR. PyObject* err_type = NULL; PyObject* err_msg = NULL; PyObject* err_traceback = NULL; PyErr_Fetch(&err_type, &err_msg, &err_traceback); if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);} if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);} if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);} PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0); PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1); PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2); PyList_SET_ITEM(__ERROR, 0, err_type); PyList_SET_ITEM(__ERROR, 1, err_msg); PyList_SET_ITEM(__ERROR, 2, err_traceback); {Py_XDECREF(old_err_type);} {Py_XDECREF(old_err_msg);} {Py_XDECREF(old_err_traceback);} } // The failure code is returned to index what code block failed. return __failure; } }; } static int __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_executor(__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230* self) { return self->run(); } static void __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_destructor(void* executor, void* self) { delete ((__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230*)self); } ////////////////////// //// Functions ////////////////////// static PyObject * instantiate(PyObject * self, PyObject *argtuple) { assert(PyTuple_Check(argtuple)); if (8 != PyTuple_Size(argtuple)){ PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected 8, got %i", (int)PyTuple_Size(argtuple)); return NULL; } __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230* struct_ptr = new __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230(); if (struct_ptr->init( PyTuple_GET_ITEM(argtuple, 0),PyTuple_GET_ITEM(argtuple, 1),PyTuple_GET_ITEM(argtuple, 2),PyTuple_GET_ITEM(argtuple, 3),PyTuple_GET_ITEM(argtuple, 4),PyTuple_GET_ITEM(argtuple, 5),PyTuple_GET_ITEM(argtuple, 6),PyTuple_GET_ITEM(argtuple, 7) ) != 0) { delete struct_ptr; return NULL; } PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&__struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_executor), struct_ptr, __struct_compiled_op_9b0fd5da0fea7d8a4504cbf7bb8a5230_destructor); return thunk; } ////////////////////// //// Module init ////////////////////// static PyMethodDef MyMethods[] = { {"instantiate", instantiate, METH_VARARGS, "undocumented"} , {NULL, NULL, 0, NULL} }; PyMODINIT_FUNC init9b0fd5da0fea7d8a4504cbf7bb8a5230(void){ (void) Py_InitModule("9b0fd5da0fea7d8a4504cbf7bb8a5230", MyMethods); }
105c37bd90726549c760b867ebfd655c33f6b044.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by root on 23/03/2020. // #include "../Matrix.cuh" #include <iostream> __global__ void matrixAddScalar(double *a, double b, double *c, int cr, int cc){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if(x < cc && y < cr){ c[y * cc + x] = a[y * cc + x]+b; } } Matrix Matrix::addScalar(double m){ static double* c; c = (double*) calloc(this->Rows*this->Columns,sizeof(double)); //Define os endereoes da memria de vdeo double *d_a, *d_c; //Define o tamanho de cada matriz e escalar na memria long aSize = this->Rows*this->Columns*sizeof(double); long cSize = this->Rows*this->Columns*sizeof(double); //Aloca espao na memria de vdeo hipMalloc((void**)&d_a, aSize); hipMalloc((void**)&d_c, cSize); //Move a matriz e o escalar para a memria de vdeo alocada hipMemcpy(d_a, this->Value, aSize, hipMemcpyHostToDevice); //Define as dimenses dim3 dimBlock(32,32); // 32x32 -> 1024 Threads dim3 dimGrid(this->Rows,this->Columns); //Efetua a multiplicao hipLaunchKernelGGL(( matrixAddScalar), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, m, d_c, this->Rows, this->Columns); //Copia o resultado de volta hipMemcpy(c, d_c, cSize, hipMemcpyDeviceToHost); //Limpa a memria de vdeo hipFree(d_a); hipFree(d_c); //Salva return {this->Columns, this->Rows, c}; }
105c37bd90726549c760b867ebfd655c33f6b044.cu
// // Created by root on 23/03/2020. // #include "../Matrix.cuh" #include <iostream> __global__ void matrixAddScalar(double *a, double b, double *c, int cr, int cc){ int x = blockIdx.x * blockDim.x + threadIdx.x; // col int y = blockIdx.y * blockDim.y + threadIdx.y; // row if(x < cc && y < cr){ c[y * cc + x] = a[y * cc + x]+b; } } Matrix Matrix::addScalar(double m){ static double* c; c = (double*) calloc(this->Rows*this->Columns,sizeof(double)); //Define os endereçoes da memória de vídeo double *d_a, *d_c; //Define o tamanho de cada matriz e escalar na memória long aSize = this->Rows*this->Columns*sizeof(double); long cSize = this->Rows*this->Columns*sizeof(double); //Aloca espaço na memória de vídeo cudaMalloc((void**)&d_a, aSize); cudaMalloc((void**)&d_c, cSize); //Move a matriz e o escalar para a memória de vídeo alocada cudaMemcpy(d_a, this->Value, aSize, cudaMemcpyHostToDevice); //Define as dimensões dim3 dimBlock(32,32); // 32x32 -> 1024 Threads dim3 dimGrid(this->Rows,this->Columns); //Efetua a multiplicação matrixAddScalar<<<dimGrid, dimBlock>>>(d_a, m, d_c, this->Rows, this->Columns); //Copia o resultado de volta cudaMemcpy(c, d_c, cSize, cudaMemcpyDeviceToHost); //Limpa a memória de vídeo cudaFree(d_a); cudaFree(d_c); //Salva return {this->Columns, this->Rows, c}; }
01900982f355062743d79f6783ae269999cb3e51.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "calc_linear_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int objs = 1; int coords = 1; double *x = NULL; hipMalloc(&x, XSIZE*YSIZE); double *out = NULL; hipMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( calc_linear_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, objs,coords,x,out); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( calc_linear_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, objs,coords,x,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( calc_linear_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, objs,coords,x,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
01900982f355062743d79f6783ae269999cb3e51.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "calc_linear_kernel.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int objs = 1; int coords = 1; double *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); double *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); calc_linear_kernel<<<gridBlock,threadBlock>>>(objs,coords,x,out); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { calc_linear_kernel<<<gridBlock,threadBlock>>>(objs,coords,x,out); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { calc_linear_kernel<<<gridBlock,threadBlock>>>(objs,coords,x,out); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5c98e7d8d3e3298eeb73a7cda3f96fe7fc444228.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../include/cuda_histogram.h" #define BINS 256 __global__ void histogram(uchar *d_input, int height, int width, uint *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { // atomic function at device with cc 2.1 has a really performance, cc > 3.5 has better performance // see this link, https://devblogs.nvidia.com/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/ atomicAdd(&d_output[d_input[i*width + j]], 1); } } void cudaHistogram(const cv::Mat & input, uint *hist) { hist = new uint[BINS]; // define block size and dim3 block_size(THREAD_MULTIPLE, 8); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input; uint *d_output; hipStream_t stream; CUDA_CALL(hipStreamCreate(&stream)); CUDA_CALL(hipMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(hipMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, hipMemcpyHostToDevice, stream)); CUDA_CALL(hipMalloc(&d_output, sizeof(uint)*BINS)); CUDA_CALL(hipMemset(d_output, 0, sizeof(uint)*BINS)); // calling kernel hipLaunchKernelGGL(( histogram) , dim3(grid_size), dim3(block_size), 0, stream, d_input, input.rows, input.cols, d_output); CUDA_CALL(hipDeviceSynchronize()); CUDA_CALL(hipMemcpy(hist, d_output, sizeof(uint)*BINS, hipMemcpyDeviceToHost)); // resources releasing CUDA_CALL(hipStreamDestroy(stream)); CUDA_CALL(hipFree(d_input)); CUDA_CALL(hipFree(d_output)); }
5c98e7d8d3e3298eeb73a7cda3f96fe7fc444228.cu
#include "../include/cuda_histogram.h" #define BINS 256 __global__ void histogram(uchar *d_input, int height, int width, uint *d_output) { int row = blockDim.y*blockIdx.y + threadIdx.y; int col = blockDim.x*blockIdx.x + threadIdx.x; for (int i = row; i < height; i += blockDim.y*gridDim.y) for (int j = col; j < width; j += blockDim.x*gridDim.x) { // atomic function at device with cc 2.1 has a really performance, cc > 3.5 has better performance // see this link, https://devblogs.nvidia.com/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell/ atomicAdd(&d_output[d_input[i*width + j]], 1); } } void cudaHistogram(const cv::Mat & input, uint *hist) { hist = new uint[BINS]; // define block size and dim3 block_size(THREAD_MULTIPLE, 8); // divide the image into 16 grids, smaller grid do more things, improve performance a lot. dim3 grid_size(input.cols / (4 * block_size.x), input.rows / (4 * block_size.y)); uchar *d_input; uint *d_output; cudaStream_t stream; CUDA_CALL(cudaStreamCreate(&stream)); CUDA_CALL(cudaMalloc(&d_input, sizeof(uchar)*input.cols*input.rows)); CUDA_CALL(cudaMemcpyAsync(d_input, input.data, sizeof(uchar)*input.cols*input.rows, cudaMemcpyHostToDevice, stream)); CUDA_CALL(cudaMalloc(&d_output, sizeof(uint)*BINS)); CUDA_CALL(cudaMemset(d_output, 0, sizeof(uint)*BINS)); // calling kernel histogram <<<grid_size, block_size, 0, stream>>> (d_input, input.rows, input.cols, d_output); CUDA_CALL(cudaDeviceSynchronize()); CUDA_CALL(cudaMemcpy(hist, d_output, sizeof(uint)*BINS, cudaMemcpyDeviceToHost)); // resources releasing CUDA_CALL(cudaStreamDestroy(stream)); CUDA_CALL(cudaFree(d_input)); CUDA_CALL(cudaFree(d_output)); }
7ea723cd309a1c59989c485fd0d7fdd4211f4cb9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hiprand/hiprand_kernel.h> #include <time.h> #include <stdio.h> #include <conio.h> #include <iostream> #include <sstream> #include <sys/stat.h> #include <vector> #include <Windows.h> #include <fstream> using namespace std; //FUNCIONES CPU bool existeFichero(const string& fichero); void setFontSize(int FontSize); void cargarPartida(const string& fichero, int tam_tesela); void tableroAleatorio(vector<vector<int>>& tablero, int dificultad); void nuevaPartida(vector<vector<int>>& tablero, int dificultad, int filas, int columnas, int puntuacion, int tam_tesela); void imprimirTablero(vector<vector<int>>& tablero); void guardarPartida(vector<vector<int>> tablero, string nombre, int filas, int columnas, int dificultad, int puntuacion); bool quedanMovimientosF(vector<vector<int>> tablero); #define TILE_WIDTH 16 //FUNCIONES GPU __global__ void ToyBlast(int *tablero, int filas, int columnas, int fila, int columna, int *puntuacion); __device__ void eliminarPieza(int *tablero, int filas, int columnas, int fila, int columna, int valor_ini, int *cont); __device__ void bombaRotorH(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaRotorV(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaTNT(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaPuzzle(int* tablero, int filas, int columnas, int fila, int columna); int main(int argc, char *argv[]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); hipDeviceProp_t propiedades; hipGetDeviceProperties(&propiedades, 0); int hilos_max = propiedades.maxThreadsPerBlock; int tam_tesela = TILE_WIDTH; //SI HAY MS ARGUMENTOS QUE argv[0] if (argc > 1) { //SI SLO HAY UN ARGUMENTO MS, SE CARGA EL FICHERO CON ESE NOMBRE if (argc == 2) { cargarPartida(argv[1], tam_tesela); } else { //SI HAY TRES ARGUMENTOS MS, SE CARGA LA PARTIDA CON (dificultad, filas, columnas) if (argc == 4) { int dificultad = atoi(argv[1]); int filas = atoi(argv[2]); int columnas = atoi(argv[3]); vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); //SE ALEATORIZA EL TABLERO Y SE INICIA LA PARTIDA tableroAleatorio(tablero, dificultad); nuevaPartida(tablero, dificultad, filas, columnas, 0, tam_tesela); } else { cout << "El archivo debe ejecutarse de una de las tres maneras:\n-Sin argumentos.\n-Con un unico argumento indicando el nombre del fichero a cargar.\n-Con tres argumentos indicando dificultad, filas y columnas del nuevo tablero.\n"; } } } else { bool valido = false; bool nueva = true; string fichero; while (!valido) { cout << "Si desea cargar una partida, introduzca su nombre con la extension (.txt). Presione enter para iniciar una nueva partida.\n"; getline(cin, fichero); if (fichero != "") { //COMPROBAMOS SI EL FICHERO EXISTE, SI NO VOLVEMOS A PREGUNTAR if (existeFichero(fichero)) { valido = true; nueva = false; } else { cout << "El fichero no existe."; } } else { //NUEVA PARTIDA valido = true; } } if (nueva) { //INICIO NUEVA PARTIDA cout << "Por favor, introduzca el numero de filas.\n"; int filas; //GET FILAS while (!(cin >> filas)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de fila valido.\n"; } cout << "Por favor, introduzca el numero de columnas.\n"; int columnas; //GET COLUMNAS while (!(cin >> columnas)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de columna valido.\n"; } //GET DIFICULTAD int dificultad = -1; while (dificultad != 2 && dificultad != 1) { cout << "Por favor, introduzca la dificultad (1 para dificultad facil y 2 para dificil).\n"; while (!(cin >> dificultad)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de dificultad valido.\n"; } if (dificultad != 2 && dificultad != 1) { cout << "Entrada invalida.\n"; } } //SI EL TABLERO NO CABE EN PANTALLA, SE HACE LA LETRA MS PEQUEA if (columnas > 48) { setFontSize(11); if (columnas > 55) setFontSize(8); if (columnas > 75) setFontSize(6); if (columnas > 90) setFontSize(4); } vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); tableroAleatorio(tablero, dificultad); nuevaPartida(tablero, dificultad, filas, columnas, 0, tam_tesela); } else { cargarPartida(fichero, tam_tesela); } } return 0; } inline bool existeFichero(const string& fichero) { struct stat buffer; return (stat(fichero.c_str(), &buffer) == 0); } void cargarPartida(const string& fichero, int tam_tesela) { //YA SE HA COMPROBADO QUE EL ARCHIVO EXISTE, CARGAR ARCHIVO EXISTENTE vector<int> datavec; ifstream infile; infile.open(fichero, ios::in | ios::binary); while (infile) { int val; infile.read(reinterpret_cast<char *>(&val), sizeof(int)); if (infile.bad()) { throw std::runtime_error("Failed to read from infile!"); } if (infile.eof()) break; datavec.push_back(val); } //FORMATO DEL FICHERO: un vector de enteros con vector[0]=puntuacion, vector[1]=filas, vector[2]=columnas, vector[3]=dificultad, y la matriz en una lista unidimensional de enteros int puntuacion = datavec[0]; datavec.erase(datavec.begin()); int filas = datavec[0]; datavec.erase(datavec.begin()); int dificultad = datavec[0]; datavec.erase(datavec.begin()); int columnas = (datavec.size()) / filas; vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); imprimirTablero(tablero); for (int i = 0; i < filas; i++) { for (int j = 0; j < columnas; j++) { tablero[i][j] = datavec[0]; datavec.erase(datavec.begin()); } } nuevaPartida(tablero, dificultad, filas, columnas, puntuacion, tam_tesela); } void setFontSize(int FontSize) { //PONER LA FUENTE A CIERTO TAMAO CONSOLE_FONT_INFOEX info = { 0 }; info.cbSize = sizeof(info); info.dwFontSize.Y = FontSize; info.FontWeight = FW_NORMAL; wcscpy(info.FaceName, L"Lucida Console"); SetCurrentConsoleFontEx(GetStdHandle(STD_OUTPUT_HANDLE), NULL, &info); } void tableroAleatorio(vector<vector<int>>& tablero, int dificultad) { srand(time(NULL)); //MODIFICA TODOS LOS HUECOS LIBRES DEL TABLERO (QUE SEAN 0) CON BLOQUES ALEATORIOS if (dificultad == 1) { for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { if (tablero[i][j] == 0) tablero[i][j] = rand() % 5 + 1; } } } else { for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { if (tablero[i][j] == 0) tablero[i][j] = rand() % 6 + 1; } } } } void imprimirTablero(vector<vector<int>>& tablero) { //IMPRIMIR CABECERA cout << "#_____________TABLERO_DE_JUEGO_____________\n\n "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 8 + 15 * 16); for (int i = 0; i < tablero[0].size(); ++i) { if (i % 2 == 0) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 1 + 16 * 8); } else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15 + 16 * 1); } cout << " "; if (i + 1 < 10) cout << " "; cout << i + 1; if (i + 1 < 100) cout << " "; } cout << "\n"; //IMPRIMIR CADA POSICIN for (int i = 0; i < tablero.size(); ++i) { if (i % 2 == 0) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 1 + 16 * 8); } else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15 + 16 * 1); } cout << " "; if (i + 1<10) cout << " "; if (i + 1 < 100) cout << " "; cout << i + 1; cout << " "; for (int j = 0; j < tablero[0].size(); ++j) { switch (tablero[i][j]) { case 1:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+9*16); break; case 2:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+12*16); break; case 3:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+5*16); //NO HAY COLOR NARANJA break; case 4:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+2*16); break; case 5:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+6*16); break; case 6:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+14*16); break; case 7:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+10*16); break; case 8:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+10*16); break; case 9:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+11*16); break; default:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+13*16); break; } cout << " "; if (tablero[i][j] < 10) cout << " "; cout << tablero[i][j]; cout << " "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); } cout << "\n"; } } void nuevaPartida(vector<vector<int>>& tablero, int dificultad, int filas, int columnas, int puntuacion, int tam_tesela) { system("CLS"); ShowWindow(GetConsoleWindow(), SW_MAXIMIZE); //IMPRIMIR TABLERO imprimirTablero(tablero); //LEER FILA Y COLUMNA DE LA JUGADA int fila = -1; bool quedanMovimientos = true; while (fila != 0 && quedanMovimientos) { printf("Puntuacion actual: %d,\n", puntuacion); while (fila < 0 || fila > filas) { cout << "Introduce la fila de la pieza a eliminar (0 para salir). Los rotores horizontales son:"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 10); cout << " 7"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", los verticales son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 10); cout << " 8"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", las bombas TNT son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 11); cout << " 9"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", las bombas puzzle son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); cout << "1X"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", donde X indica el valor que eliminan.\n"; while (!(cin >> fila)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de fila valido.\n"; } if (fila < 0 || fila > filas) cout << "Fila fuera de rango.\n"; } if (fila != 0) { int columna = 0; while (columna < 1 || columna > columnas) { cout << "Introduce la columna de la pieza a eliminar.\n"; while (!(cin >> columna)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de columna valido.\n"; } if (columna < 1 || columna > columnas) cout << "Columna fuera de rango.\n"; } //FIN LEER FILA Y COLUMNA DE LA JUGADA int mayor = max(filas, columnas); //CUDA int *d_tablero; int *d_puntuacion; //DADO QUE CUDA NO SOPORTA VECTORES, PASAMOS EL VECTOR A ARRAY UNIDIMENSIONAL int* tablero_a = new int[tablero.size()*tablero[0].size()]; for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { tablero_a[i*tablero[0].size() + j] = tablero[i][j]; } } //ALOCAMOS MEMORIA PARA EL TABLERO Y COPIAMOS NUESTRO ARRAY DE CPU A GPU hipMalloc(&d_tablero, (tablero.size()*tablero[0].size()*sizeof(int))); hipMemcpy(d_tablero, tablero_a, (tablero.size()*tablero[0].size()*sizeof(int)), hipMemcpyHostToDevice); hipMalloc(&d_puntuacion, (sizeof(int))); hipMemcpy(d_puntuacion, &puntuacion, sizeof(int), hipMemcpyHostToDevice); int n_bloques = (mayor+tam_tesela-1) / tam_tesela; dim3 DimGrid(n_bloques, n_bloques); dim3 DimBlock(tam_tesela, tam_tesela, 1); ToyBlast << < DimGrid, DimBlock >> > (d_tablero, filas, columnas, fila - 1, columna - 1, d_puntuacion); //UNA VEZ TERMINA, VOLVEMOS A COPIAR EL ARRAY DE GPU A CPU hipMemcpy(tablero_a, d_tablero, tablero.size()*tablero[0].size()*sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&puntuacion, d_puntuacion, sizeof(int), hipMemcpyDeviceToHost); //PASAMOS EL ARRAY A VECTOR for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { tablero[i][j] = tablero_a[i*tablero[0].size() + j]; } } //RELLENAMOS LOS CEROS CON ALEATORIO tableroAleatorio(tablero, dificultad); imprimirTablero(tablero); //LIBERAMOS MEMORIA DE GPU hipFree(d_tablero); hipFree(d_puntuacion); fila = -1; quedanMovimientos = quedanMovimientosF(tablero); } else { cout << "Deseas guardar la partida? Introduzca 0 para no o 1 para si.\n"; int guardar; while (!(cin >> guardar)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un valor valido.\n"; } if (guardar == 1) { string nombre; cout << "Introduzca el nombre de la partida a guardar.\n"; cin >> nombre; guardarPartida(tablero, nombre, filas, columnas, dificultad, puntuacion); } } } if (!quedanMovimientos) { printf("No quedan movimientos posibles. Su puntuacion ha sido:\n %d \n GRACIAS POR JUGAR\n", puntuacion); } } void guardarPartida(vector<vector<int>> tablero, string nombre, int filas, int columnas, int dificultad, int puntuacion) { //GUARDAR LA PARTIDA SERIALIZANDO EL VECTOR COMO ARRAY UNIDIMENSIONAL DE ENTEROS ofstream outfile; outfile.open(nombre, ios::out | ios::trunc | ios::binary); outfile.write(reinterpret_cast<const char *>(&puntuacion), sizeof(int)); outfile.write(reinterpret_cast<const char *>(&filas), sizeof(int)); outfile.write(reinterpret_cast<const char *>(&dificultad), sizeof(int)); for (int i = 0; i < tablero.size(); i++) { for (int j = 0; j < tablero[0].size(); j++) { outfile.write(reinterpret_cast<const char *>(&tablero[i][j]), sizeof(int)); if (outfile.bad()) { throw std::runtime_error("Failed to write to outfile!"); } } } } bool quedanMovimientosF(vector<vector<int>> tablero) { //SI NO HAY DOS PIEZAS JUNTAS EN NINGN ESPACIO DEL TABLERO, NO QUEDAN MOVIMIENTOS for (int i = 0; i < tablero.size(); i++) { for (int j = 0; j < tablero[0].size(); j++) { if (tablero[i][j]>6) return true; if ((i>0) && (tablero[i][j] == tablero[i - 1][j])) return true; if (((i + 1)<tablero.size()) && (tablero[i][j] == tablero[i + 1][j])) return true; if ((j>0) && (tablero[i][j] == tablero[i][j - 1])) return true; if (((j + 1)<tablero[0].size()) && (tablero[i][j] == tablero[i][j + 1])) return true; } } return false; } __global__ void ToyBlast(int *tablero, int filas, int columnas, int fila, int columna, int *puntuacion) { int hilo_fila = blockIdx.x*blockDim.x + threadIdx.x; int hilo_columna = blockIdx.y*blockDim.y + threadIdx.y; if (hilo_fila == fila && hilo_columna == columna) { int cont = 0; int valor = tablero[fila*columnas + columna]; eliminarPieza(tablero, filas, columnas, fila, columna, valor, &cont); if (cont < 2) { tablero[fila*columnas + columna] = valor; } else { if (cont > 4) { //crearBomba switch (cont) { case 5: int aleatorio = clock() % 10; if (aleatorio < 5) { tablero[fila*columnas + columna] = 7; //SE CREA ALEATORIAMENTE UN ROTOR HORIZONTAL O VERTICAL } else { tablero[fila*columnas + columna] = 8; } break; case 6: tablero[fila*columnas + columna] = 9; break; default: tablero[fila*columnas + columna] = 10 + valor; //PARA ALMACENAR EL BLOQUE A EXPLOTAR DEL PUZZLE, LA BOMBA SERA DE 11 A 16 EN FUNCIN DEL COLOR break; } } } if (cont>1) *puntuacion = *puntuacion + cont; } __syncthreads(); //SUBIR TODOS LOS CEROS if (hilo_columna < columnas&&hilo_fila < filas) { for (int i = 1; i < filas; i++) { if (tablero[(filas - i)*columnas + hilo_columna] == 0) { if (tablero[(filas - (i + 1))*columnas + hilo_columna] == 0) { int j = i; while (tablero[(filas - (j + 1))*columnas + hilo_columna] == 0 && j < filas) { j++; } tablero[(filas - i)*columnas + hilo_columna] = tablero[(filas - (j + 1))*columnas + hilo_columna]; tablero[(filas - (j + 1))*columnas + hilo_columna] = 0; } else { tablero[(filas - i)*columnas + hilo_columna] = tablero[(filas - (i + 1))*columnas + hilo_columna]; tablero[(filas - (i + 1))*columnas + hilo_columna] = 0; } } __syncthreads(); } } } __device__ void eliminarPieza(int *tablero, int filas, int columnas, int fila, int columna, int valor_ini, int *cont) { //DECLARAMOS BOOLEANOS PARA SABER HACIA DONDE TIENE QUE COMPROBAR SI TIENE QUE ELIMINAR LA PIEZA, SI TIENE QUE COMPROBARLA VOLVEMOS A LLAMAR A eliminarPieza int valor_act = tablero[fila*columnas + columna]; if ((valor_act == valor_ini) && (valor_act<7)) { tablero[fila*columnas + columna] = 0; *cont = *cont + 1; bool arriba = true; bool abajo = true; bool izquierda = true; bool derecha = true; if (fila < 1) arriba = false; if (columna < 1) izquierda = false; if (fila + 1 >= filas) abajo = false; if (columna + 1 >= columnas) derecha = false; if (arriba) eliminarPieza(tablero, filas, columnas, fila - 1, columna, valor_ini, cont); if (izquierda) eliminarPieza(tablero, filas, columnas, fila, columna - 1, valor_ini, cont); if (abajo) eliminarPieza(tablero, filas, columnas, fila + 1, columna, valor_ini, cont); if (derecha) eliminarPieza(tablero, filas, columnas, fila, columna + 1, valor_ini, cont); } else { //SI NO COINICIDE EL VALOR, HAY QUE COMPROBAR SI ES UNA BOMBA, PERO SLO HAY QUE EXPLOTARLA SI ES LA PRIMERA PIEZA ELIMINADA if ((*cont == 0) && (valor_act>6)) { //ES UNA BOMBA Y HAY QUE EXPLOTARLA *cont = 2; switch (valor_act) { case 7: //BOMBA 5 PIEZAS HORIZONTAL bombaRotorH(tablero, filas, columnas, fila, columna); break; case 8: //BOMBA 5 PIEZAS VERTICAL bombaRotorV(tablero, filas, columnas, fila, columna); break; case 9: //BOMBA 6 PIEZAS bombaTNT(tablero, filas, columnas, fila, columna); break; default://BOMBA 7 PIEZAS bombaPuzzle(tablero, filas, columnas, fila, columna); break; } } else { //ES UNA BOMBA PERO NO HAY QUE EXPLOTARLA, ES ADYACENTE A LAS QUE HAY QUE EXPLOTAR } } } __device__ void bombaRotorH(int* tablero, int filas, int columnas, int fila, int columna) { //BORRAR FILA for (int i = 0; i < columnas; i++) { tablero[fila*columnas + i] = 0; } } __device__ void bombaRotorV(int* tablero, int filas, int columnas, int fila, int columna) { //BORRAR COLUMNA for (int i = 0; i < filas; i++) { tablero[i*columnas + columna] = 0; } } __device__ void bombaTNT(int* tablero, int filas, int columnas, int fila, int columna) { tablero[fila*columnas + columna] = 0; bool arriba = true; bool abajo = true; bool izquierda = true; bool derecha = true; if (fila < 1) arriba = false; if (columna < 1) izquierda = false; if (fila + 1 >= filas) abajo = false; if (columna + 1 >= columnas) derecha = false; if (arriba) tablero[(fila - 1)*columnas + columna] = 0; if (izquierda) tablero[fila*columnas + (columna - 1)] = 0; if (abajo) tablero[(fila + 1)*columnas + columna] = 0; if (derecha) tablero[fila*columnas + (columna + 1)] = 0; if (arriba&&izquierda) tablero[(fila - 1)*columnas + (columna - 1)] = 0; if (arriba&&derecha) tablero[(fila - 1)*columnas + (columna + 1)] = 0; if (abajo&&izquierda) tablero[(fila + 1)*columnas + (columna - 1)] = 0; if (abajo&&derecha) tablero[(fila + 1)*columnas + (columna + 1)] = 0; } __device__ void bombaPuzzle(int* tablero, int filas, int columnas, int fila, int columna) { int valor = tablero[fila*columnas + columna] - 10; tablero[fila*columnas + columna] = 0; for (int i = 0; i < filas; i++) { for (int j = 0; j < columnas; j++) { if (tablero[i*columnas + j] == valor) tablero[i*columnas + j] = 0; } } }
7ea723cd309a1c59989c485fd0d7fdd4211f4cb9.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <curand_kernel.h> #include <time.h> #include <stdio.h> #include <conio.h> #include <iostream> #include <sstream> #include <sys/stat.h> #include <vector> #include <Windows.h> #include <fstream> using namespace std; //FUNCIONES CPU bool existeFichero(const string& fichero); void setFontSize(int FontSize); void cargarPartida(const string& fichero, int tam_tesela); void tableroAleatorio(vector<vector<int>>& tablero, int dificultad); void nuevaPartida(vector<vector<int>>& tablero, int dificultad, int filas, int columnas, int puntuacion, int tam_tesela); void imprimirTablero(vector<vector<int>>& tablero); void guardarPartida(vector<vector<int>> tablero, string nombre, int filas, int columnas, int dificultad, int puntuacion); bool quedanMovimientosF(vector<vector<int>> tablero); #define TILE_WIDTH 16 //FUNCIONES GPU __global__ void ToyBlast(int *tablero, int filas, int columnas, int fila, int columna, int *puntuacion); __device__ void eliminarPieza(int *tablero, int filas, int columnas, int fila, int columna, int valor_ini, int *cont); __device__ void bombaRotorH(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaRotorV(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaTNT(int* tablero, int filas, int columnas, int fila, int columna); __device__ void bombaPuzzle(int* tablero, int filas, int columnas, int fila, int columna); int main(int argc, char *argv[]) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cudaDeviceProp propiedades; cudaGetDeviceProperties(&propiedades, 0); int hilos_max = propiedades.maxThreadsPerBlock; int tam_tesela = TILE_WIDTH; //SI HAY MÁS ARGUMENTOS QUE argv[0] if (argc > 1) { //SI SÓLO HAY UN ARGUMENTO MÁS, SE CARGA EL FICHERO CON ESE NOMBRE if (argc == 2) { cargarPartida(argv[1], tam_tesela); } else { //SI HAY TRES ARGUMENTOS MÁS, SE CARGA LA PARTIDA CON (dificultad, filas, columnas) if (argc == 4) { int dificultad = atoi(argv[1]); int filas = atoi(argv[2]); int columnas = atoi(argv[3]); vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); //SE ALEATORIZA EL TABLERO Y SE INICIA LA PARTIDA tableroAleatorio(tablero, dificultad); nuevaPartida(tablero, dificultad, filas, columnas, 0, tam_tesela); } else { cout << "El archivo debe ejecutarse de una de las tres maneras:\n-Sin argumentos.\n-Con un unico argumento indicando el nombre del fichero a cargar.\n-Con tres argumentos indicando dificultad, filas y columnas del nuevo tablero.\n"; } } } else { bool valido = false; bool nueva = true; string fichero; while (!valido) { cout << "Si desea cargar una partida, introduzca su nombre con la extension (.txt). Presione enter para iniciar una nueva partida.\n"; getline(cin, fichero); if (fichero != "") { //COMPROBAMOS SI EL FICHERO EXISTE, SI NO VOLVEMOS A PREGUNTAR if (existeFichero(fichero)) { valido = true; nueva = false; } else { cout << "El fichero no existe."; } } else { //NUEVA PARTIDA valido = true; } } if (nueva) { //INICIO NUEVA PARTIDA cout << "Por favor, introduzca el numero de filas.\n"; int filas; //GET FILAS while (!(cin >> filas)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de fila valido.\n"; } cout << "Por favor, introduzca el numero de columnas.\n"; int columnas; //GET COLUMNAS while (!(cin >> columnas)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de columna valido.\n"; } //GET DIFICULTAD int dificultad = -1; while (dificultad != 2 && dificultad != 1) { cout << "Por favor, introduzca la dificultad (1 para dificultad facil y 2 para dificil).\n"; while (!(cin >> dificultad)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de dificultad valido.\n"; } if (dificultad != 2 && dificultad != 1) { cout << "Entrada invalida.\n"; } } //SI EL TABLERO NO CABE EN PANTALLA, SE HACE LA LETRA MÁS PEQUEÑA if (columnas > 48) { setFontSize(11); if (columnas > 55) setFontSize(8); if (columnas > 75) setFontSize(6); if (columnas > 90) setFontSize(4); } vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); tableroAleatorio(tablero, dificultad); nuevaPartida(tablero, dificultad, filas, columnas, 0, tam_tesela); } else { cargarPartida(fichero, tam_tesela); } } return 0; } inline bool existeFichero(const string& fichero) { struct stat buffer; return (stat(fichero.c_str(), &buffer) == 0); } void cargarPartida(const string& fichero, int tam_tesela) { //YA SE HA COMPROBADO QUE EL ARCHIVO EXISTE, CARGAR ARCHIVO EXISTENTE vector<int> datavec; ifstream infile; infile.open(fichero, ios::in | ios::binary); while (infile) { int val; infile.read(reinterpret_cast<char *>(&val), sizeof(int)); if (infile.bad()) { throw std::runtime_error("Failed to read from infile!"); } if (infile.eof()) break; datavec.push_back(val); } //FORMATO DEL FICHERO: un vector de enteros con vector[0]=puntuacion, vector[1]=filas, vector[2]=columnas, vector[3]=dificultad, y la matriz en una lista unidimensional de enteros int puntuacion = datavec[0]; datavec.erase(datavec.begin()); int filas = datavec[0]; datavec.erase(datavec.begin()); int dificultad = datavec[0]; datavec.erase(datavec.begin()); int columnas = (datavec.size()) / filas; vector<vector<int>> tablero; tablero.resize(filas, vector<int>(columnas, 0)); imprimirTablero(tablero); for (int i = 0; i < filas; i++) { for (int j = 0; j < columnas; j++) { tablero[i][j] = datavec[0]; datavec.erase(datavec.begin()); } } nuevaPartida(tablero, dificultad, filas, columnas, puntuacion, tam_tesela); } void setFontSize(int FontSize) { //PONER LA FUENTE A CIERTO TAMAÑO CONSOLE_FONT_INFOEX info = { 0 }; info.cbSize = sizeof(info); info.dwFontSize.Y = FontSize; info.FontWeight = FW_NORMAL; wcscpy(info.FaceName, L"Lucida Console"); SetCurrentConsoleFontEx(GetStdHandle(STD_OUTPUT_HANDLE), NULL, &info); } void tableroAleatorio(vector<vector<int>>& tablero, int dificultad) { srand(time(NULL)); //MODIFICA TODOS LOS HUECOS LIBRES DEL TABLERO (QUE SEAN 0) CON BLOQUES ALEATORIOS if (dificultad == 1) { for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { if (tablero[i][j] == 0) tablero[i][j] = rand() % 5 + 1; } } } else { for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { if (tablero[i][j] == 0) tablero[i][j] = rand() % 6 + 1; } } } } void imprimirTablero(vector<vector<int>>& tablero) { //IMPRIMIR CABECERA cout << "#_____________TABLERO_DE_JUEGO_____________\n\n "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 8 + 15 * 16); for (int i = 0; i < tablero[0].size(); ++i) { if (i % 2 == 0) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 1 + 16 * 8); } else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15 + 16 * 1); } cout << " "; if (i + 1 < 10) cout << " "; cout << i + 1; if (i + 1 < 100) cout << " "; } cout << "\n"; //IMPRIMIR CADA POSICIÓN for (int i = 0; i < tablero.size(); ++i) { if (i % 2 == 0) { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 1 + 16 * 8); } else { SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15 + 16 * 1); } cout << " "; if (i + 1<10) cout << " "; if (i + 1 < 100) cout << " "; cout << i + 1; cout << " "; for (int j = 0; j < tablero[0].size(); ++j) { switch (tablero[i][j]) { case 1:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+9*16); break; case 2:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+12*16); break; case 3:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+5*16); //NO HAY COLOR NARANJA break; case 4:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+2*16); break; case 5:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+6*16); break; case 6:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+14*16); break; case 7:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+10*16); break; case 8:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+10*16); break; case 9:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+11*16); break; default:SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15+13*16); break; } cout << " "; if (tablero[i][j] < 10) cout << " "; cout << tablero[i][j]; cout << " "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); } cout << "\n"; } } void nuevaPartida(vector<vector<int>>& tablero, int dificultad, int filas, int columnas, int puntuacion, int tam_tesela) { system("CLS"); ShowWindow(GetConsoleWindow(), SW_MAXIMIZE); //IMPRIMIR TABLERO imprimirTablero(tablero); //LEER FILA Y COLUMNA DE LA JUGADA int fila = -1; bool quedanMovimientos = true; while (fila != 0 && quedanMovimientos) { printf("Puntuacion actual: %d,\n", puntuacion); while (fila < 0 || fila > filas) { cout << "Introduce la fila de la pieza a eliminar (0 para salir). Los rotores horizontales son:"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 10); cout << " 7"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", los verticales son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 10); cout << " 8"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", las bombas TNT son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 11); cout << " 9"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", las bombas puzzle son: "; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 13); cout << "1X"; SetConsoleTextAttribute(GetStdHandle(STD_OUTPUT_HANDLE), 15); cout << ", donde X indica el valor que eliminan.\n"; while (!(cin >> fila)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de fila valido.\n"; } if (fila < 0 || fila > filas) cout << "Fila fuera de rango.\n"; } if (fila != 0) { int columna = 0; while (columna < 1 || columna > columnas) { cout << "Introduce la columna de la pieza a eliminar.\n"; while (!(cin >> columna)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un numero de columna valido.\n"; } if (columna < 1 || columna > columnas) cout << "Columna fuera de rango.\n"; } //FIN LEER FILA Y COLUMNA DE LA JUGADA int mayor = max(filas, columnas); //CUDA int *d_tablero; int *d_puntuacion; //DADO QUE CUDA NO SOPORTA VECTORES, PASAMOS EL VECTOR A ARRAY UNIDIMENSIONAL int* tablero_a = new int[tablero.size()*tablero[0].size()]; for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { tablero_a[i*tablero[0].size() + j] = tablero[i][j]; } } //ALOCAMOS MEMORIA PARA EL TABLERO Y COPIAMOS NUESTRO ARRAY DE CPU A GPU cudaMalloc(&d_tablero, (tablero.size()*tablero[0].size()*sizeof(int))); cudaMemcpy(d_tablero, tablero_a, (tablero.size()*tablero[0].size()*sizeof(int)), cudaMemcpyHostToDevice); cudaMalloc(&d_puntuacion, (sizeof(int))); cudaMemcpy(d_puntuacion, &puntuacion, sizeof(int), cudaMemcpyHostToDevice); int n_bloques = (mayor+tam_tesela-1) / tam_tesela; dim3 DimGrid(n_bloques, n_bloques); dim3 DimBlock(tam_tesela, tam_tesela, 1); ToyBlast << < DimGrid, DimBlock >> > (d_tablero, filas, columnas, fila - 1, columna - 1, d_puntuacion); //UNA VEZ TERMINA, VOLVEMOS A COPIAR EL ARRAY DE GPU A CPU cudaMemcpy(tablero_a, d_tablero, tablero.size()*tablero[0].size()*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&puntuacion, d_puntuacion, sizeof(int), cudaMemcpyDeviceToHost); //PASAMOS EL ARRAY A VECTOR for (int i = 0; i < tablero.size(); ++i) { for (int j = 0; j < tablero[0].size(); ++j) { tablero[i][j] = tablero_a[i*tablero[0].size() + j]; } } //RELLENAMOS LOS CEROS CON ALEATORIO tableroAleatorio(tablero, dificultad); imprimirTablero(tablero); //LIBERAMOS MEMORIA DE GPU cudaFree(d_tablero); cudaFree(d_puntuacion); fila = -1; quedanMovimientos = quedanMovimientosF(tablero); } else { cout << "¿Deseas guardar la partida? Introduzca 0 para no o 1 para si.\n"; int guardar; while (!(cin >> guardar)) { cin.clear(); cin.ignore((std::numeric_limits<std::streamsize>::max)(), '\n'); cerr << "Por favor, introduzca un valor valido.\n"; } if (guardar == 1) { string nombre; cout << "Introduzca el nombre de la partida a guardar.\n"; cin >> nombre; guardarPartida(tablero, nombre, filas, columnas, dificultad, puntuacion); } } } if (!quedanMovimientos) { printf("No quedan movimientos posibles. Su puntuacion ha sido:\n %d \n GRACIAS POR JUGAR\n", puntuacion); } } void guardarPartida(vector<vector<int>> tablero, string nombre, int filas, int columnas, int dificultad, int puntuacion) { //GUARDAR LA PARTIDA SERIALIZANDO EL VECTOR COMO ARRAY UNIDIMENSIONAL DE ENTEROS ofstream outfile; outfile.open(nombre, ios::out | ios::trunc | ios::binary); outfile.write(reinterpret_cast<const char *>(&puntuacion), sizeof(int)); outfile.write(reinterpret_cast<const char *>(&filas), sizeof(int)); outfile.write(reinterpret_cast<const char *>(&dificultad), sizeof(int)); for (int i = 0; i < tablero.size(); i++) { for (int j = 0; j < tablero[0].size(); j++) { outfile.write(reinterpret_cast<const char *>(&tablero[i][j]), sizeof(int)); if (outfile.bad()) { throw std::runtime_error("Failed to write to outfile!"); } } } } bool quedanMovimientosF(vector<vector<int>> tablero) { //SI NO HAY DOS PIEZAS JUNTAS EN NINGÚN ESPACIO DEL TABLERO, NO QUEDAN MOVIMIENTOS for (int i = 0; i < tablero.size(); i++) { for (int j = 0; j < tablero[0].size(); j++) { if (tablero[i][j]>6) return true; if ((i>0) && (tablero[i][j] == tablero[i - 1][j])) return true; if (((i + 1)<tablero.size()) && (tablero[i][j] == tablero[i + 1][j])) return true; if ((j>0) && (tablero[i][j] == tablero[i][j - 1])) return true; if (((j + 1)<tablero[0].size()) && (tablero[i][j] == tablero[i][j + 1])) return true; } } return false; } __global__ void ToyBlast(int *tablero, int filas, int columnas, int fila, int columna, int *puntuacion) { int hilo_fila = blockIdx.x*blockDim.x + threadIdx.x; int hilo_columna = blockIdx.y*blockDim.y + threadIdx.y; if (hilo_fila == fila && hilo_columna == columna) { int cont = 0; int valor = tablero[fila*columnas + columna]; eliminarPieza(tablero, filas, columnas, fila, columna, valor, &cont); if (cont < 2) { tablero[fila*columnas + columna] = valor; } else { if (cont > 4) { //crearBomba switch (cont) { case 5: int aleatorio = clock() % 10; if (aleatorio < 5) { tablero[fila*columnas + columna] = 7; //SE CREA ALEATORIAMENTE UN ROTOR HORIZONTAL O VERTICAL } else { tablero[fila*columnas + columna] = 8; } break; case 6: tablero[fila*columnas + columna] = 9; break; default: tablero[fila*columnas + columna] = 10 + valor; //PARA ALMACENAR EL BLOQUE A EXPLOTAR DEL PUZZLE, LA BOMBA SERA DE 11 A 16 EN FUNCIÓN DEL COLOR break; } } } if (cont>1) *puntuacion = *puntuacion + cont; } __syncthreads(); //SUBIR TODOS LOS CEROS if (hilo_columna < columnas&&hilo_fila < filas) { for (int i = 1; i < filas; i++) { if (tablero[(filas - i)*columnas + hilo_columna] == 0) { if (tablero[(filas - (i + 1))*columnas + hilo_columna] == 0) { int j = i; while (tablero[(filas - (j + 1))*columnas + hilo_columna] == 0 && j < filas) { j++; } tablero[(filas - i)*columnas + hilo_columna] = tablero[(filas - (j + 1))*columnas + hilo_columna]; tablero[(filas - (j + 1))*columnas + hilo_columna] = 0; } else { tablero[(filas - i)*columnas + hilo_columna] = tablero[(filas - (i + 1))*columnas + hilo_columna]; tablero[(filas - (i + 1))*columnas + hilo_columna] = 0; } } __syncthreads(); } } } __device__ void eliminarPieza(int *tablero, int filas, int columnas, int fila, int columna, int valor_ini, int *cont) { //DECLARAMOS BOOLEANOS PARA SABER HACIA DONDE TIENE QUE COMPROBAR SI TIENE QUE ELIMINAR LA PIEZA, SI TIENE QUE COMPROBARLA VOLVEMOS A LLAMAR A eliminarPieza int valor_act = tablero[fila*columnas + columna]; if ((valor_act == valor_ini) && (valor_act<7)) { tablero[fila*columnas + columna] = 0; *cont = *cont + 1; bool arriba = true; bool abajo = true; bool izquierda = true; bool derecha = true; if (fila < 1) arriba = false; if (columna < 1) izquierda = false; if (fila + 1 >= filas) abajo = false; if (columna + 1 >= columnas) derecha = false; if (arriba) eliminarPieza(tablero, filas, columnas, fila - 1, columna, valor_ini, cont); if (izquierda) eliminarPieza(tablero, filas, columnas, fila, columna - 1, valor_ini, cont); if (abajo) eliminarPieza(tablero, filas, columnas, fila + 1, columna, valor_ini, cont); if (derecha) eliminarPieza(tablero, filas, columnas, fila, columna + 1, valor_ini, cont); } else { //SI NO COINICIDE EL VALOR, HAY QUE COMPROBAR SI ES UNA BOMBA, PERO SÓLO HAY QUE EXPLOTARLA SI ES LA PRIMERA PIEZA ELIMINADA if ((*cont == 0) && (valor_act>6)) { //ES UNA BOMBA Y HAY QUE EXPLOTARLA *cont = 2; switch (valor_act) { case 7: //BOMBA 5 PIEZAS HORIZONTAL bombaRotorH(tablero, filas, columnas, fila, columna); break; case 8: //BOMBA 5 PIEZAS VERTICAL bombaRotorV(tablero, filas, columnas, fila, columna); break; case 9: //BOMBA 6 PIEZAS bombaTNT(tablero, filas, columnas, fila, columna); break; default://BOMBA 7 PIEZAS bombaPuzzle(tablero, filas, columnas, fila, columna); break; } } else { //ES UNA BOMBA PERO NO HAY QUE EXPLOTARLA, ES ADYACENTE A LAS QUE HAY QUE EXPLOTAR } } } __device__ void bombaRotorH(int* tablero, int filas, int columnas, int fila, int columna) { //BORRAR FILA for (int i = 0; i < columnas; i++) { tablero[fila*columnas + i] = 0; } } __device__ void bombaRotorV(int* tablero, int filas, int columnas, int fila, int columna) { //BORRAR COLUMNA for (int i = 0; i < filas; i++) { tablero[i*columnas + columna] = 0; } } __device__ void bombaTNT(int* tablero, int filas, int columnas, int fila, int columna) { tablero[fila*columnas + columna] = 0; bool arriba = true; bool abajo = true; bool izquierda = true; bool derecha = true; if (fila < 1) arriba = false; if (columna < 1) izquierda = false; if (fila + 1 >= filas) abajo = false; if (columna + 1 >= columnas) derecha = false; if (arriba) tablero[(fila - 1)*columnas + columna] = 0; if (izquierda) tablero[fila*columnas + (columna - 1)] = 0; if (abajo) tablero[(fila + 1)*columnas + columna] = 0; if (derecha) tablero[fila*columnas + (columna + 1)] = 0; if (arriba&&izquierda) tablero[(fila - 1)*columnas + (columna - 1)] = 0; if (arriba&&derecha) tablero[(fila - 1)*columnas + (columna + 1)] = 0; if (abajo&&izquierda) tablero[(fila + 1)*columnas + (columna - 1)] = 0; if (abajo&&derecha) tablero[(fila + 1)*columnas + (columna + 1)] = 0; } __device__ void bombaPuzzle(int* tablero, int filas, int columnas, int fila, int columna) { int valor = tablero[fila*columnas + columna] - 10; tablero[fila*columnas + columna] = 0; for (int i = 0; i < filas; i++) { for (int j = 0; j < columnas; j++) { if (tablero[i*columnas + j] == valor) tablero[i*columnas + j] = 0; } } }
22a5bd3c4bc384e53a4b4c4a48ab4d31c2f1c2e9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "coordfunctions.cuh" #include "constants.cuh" #include "conversions.cuh" #include "helper_math.h" extern texture <float4, hipTextureType3D, hipReadModeElementType> dataTex; /* Uses parallel computing to determine the origin (middle) of each of the field lines computed with RK4 and stored in lineoutput earlier. Warning: only works when the total number of threads used to call this function is a multiple of numberoflines, and their ratio is a divisor of the blocksize (so each RK4 line will be processed within a single block) */ __device__ int signdiff(float a, float b) { return (a < 0 && b >= 0) || (a>0 && b <=0); } //find the number of x=0 transitions in g_linedata, storing the result in g_sumdata. Only works for powers of 2 datasets and needs a minimum of sdata of 64*sizeof(float) (!) __global__ void reducePC(float4* g_linedata, int* g_PCdata) { extern __shared__ int idata[]; //load data from global data&texture to shared mem and perform cross product unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; idata[tid] = signdiff(g_linedata[i].x,g_linedata[i+1].x); __syncthreads(); //do the reductions for( unsigned int s=blockDim.x/2; s>32; s>>=1) {//32 = warpsize if(tid < s) { idata[tid] += idata[tid+s]; } __syncthreads(); } if(tid<32) {// Warp's zijn SIMD gesynchroniseerd idata[tid] += idata[tid + 32]; idata[tid] += idata[tid + 16]; idata[tid] += idata[tid + 8]; idata[tid] += idata[tid + 4]; idata[tid] += idata[tid + 2]; idata[tid] += idata[tid + 1]; } //write result to global if(tid == 0) g_PCdata[blockIdx.x] = idata[0]; } __global__ void normal(float4* g_linedata, float4* g_normaldata, float4* g_origin) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; float4 locCord = g_linedata[i]; float4 shiftedCord = ShiftCoord(locCord, *g_origin); g_normaldata[i] = make_float4(cross( make_float3(shiftedCord), make_float3(tex3D(dataTex, Smiet2Tex(locCord))) )); } /* Warning: absolutely useless! Mathematics is not correct, does not give normal to plane of torus!! DO NOT USE Give a third parameter to your kernellaunch for the size of sdata __global__ void reduceNormal(float4* g_linedata, float4* g_normaldata) {//equivalent to doing the texture-fetch and cross product and applying reducesum extern __shared__ float4 sdata[]; //load data from global data&texture to shared mem and perform cross product unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = make_float4(cross(make_float3(g_linedata[i]), make_float3(tex3D(dataTex, Smiet2Tex(g_linedata[i]))))); __syncthreads(); //do the reductions for( unsigned int s=blockDim.x/2; s>32; s>>=1) {//32 = warpsize if(tid < s) { sdata[tid] += sdata[tid+s]; } __syncthreads(); } if(tid<32) {// Warp's zijn SIMD gesynchroniseerd sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } //write result to global if(tid == 0) g_normaldata[blockIdx.x] = sdata[0]; }*/ __global__ void winding(float4* g_linedata, float* g_alpha, float* g_beta, float4* d_origin, float* g_r, float4* d_normal, unsigned int steps) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int modifier = min(i%steps,1); float4 locCord = Cart2Tor(ShiftCoord(g_linedata[i], *d_origin), *d_normal, *g_r); locCord -= Cart2Tor(ShiftCoord(g_linedata[i-modifier], *d_origin), *d_normal, *g_r); //lelijk en langzaam, maar mijn bit-wise magic is niet genoeg om dit netjes te doen if(locCord.y > PI/2.0) { locCord.y -= PI; } else if (locCord.y < -0.5*PI) { locCord.y += PI; } if(locCord.z > PI/2.0) { locCord.z -= PI; } else if (locCord.z < -0.5*PI) { locCord.z += PI; } g_alpha[i] = locCord.y; g_beta[i] = locCord.z; } __global__ void divide(float* enumerator, float* denominator, float* output) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; output[i] = enumerator[i]/denominator[i]; } __global__ void divide(float4* enumerator, float denominator, float4* output) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; output[i] = enumerator[i]/denominator; }
22a5bd3c4bc384e53a4b4c4a48ab4d31c2f1c2e9.cu
#include "coordfunctions.cuh" #include "constants.cuh" #include "conversions.cuh" #include "helper_math.h" extern texture <float4, cudaTextureType3D, cudaReadModeElementType> dataTex; /* Uses parallel computing to determine the origin (middle) of each of the field lines computed with RK4 and stored in lineoutput earlier. Warning: only works when the total number of threads used to call this function is a multiple of numberoflines, and their ratio is a divisor of the blocksize (so each RK4 line will be processed within a single block) */ __device__ int signdiff(float a, float b) { return (a < 0 && b >= 0) || (a>0 && b <=0); } //find the number of x=0 transitions in g_linedata, storing the result in g_sumdata. Only works for powers of 2 datasets and needs a minimum of sdata of 64*sizeof(float) (!) __global__ void reducePC(float4* g_linedata, int* g_PCdata) { extern __shared__ int idata[]; //load data from global data&texture to shared mem and perform cross product unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; idata[tid] = signdiff(g_linedata[i].x,g_linedata[i+1].x); __syncthreads(); //do the reductions for( unsigned int s=blockDim.x/2; s>32; s>>=1) {//32 = warpsize if(tid < s) { idata[tid] += idata[tid+s]; } __syncthreads(); } if(tid<32) {// Warp's zijn SIMD gesynchroniseerd idata[tid] += idata[tid + 32]; idata[tid] += idata[tid + 16]; idata[tid] += idata[tid + 8]; idata[tid] += idata[tid + 4]; idata[tid] += idata[tid + 2]; idata[tid] += idata[tid + 1]; } //write result to global if(tid == 0) g_PCdata[blockIdx.x] = idata[0]; } __global__ void normal(float4* g_linedata, float4* g_normaldata, float4* g_origin) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; float4 locCord = g_linedata[i]; float4 shiftedCord = ShiftCoord(locCord, *g_origin); g_normaldata[i] = make_float4(cross( make_float3(shiftedCord), make_float3(tex3D(dataTex, Smiet2Tex(locCord))) )); } /* Warning: absolutely useless! Mathematics is not correct, does not give normal to plane of torus!! DO NOT USE Give a third parameter to your kernellaunch for the size of sdata __global__ void reduceNormal(float4* g_linedata, float4* g_normaldata) {//equivalent to doing the texture-fetch and cross product and applying reducesum extern __shared__ float4 sdata[]; //load data from global data&texture to shared mem and perform cross product unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = make_float4(cross(make_float3(g_linedata[i]), make_float3(tex3D(dataTex, Smiet2Tex(g_linedata[i]))))); __syncthreads(); //do the reductions for( unsigned int s=blockDim.x/2; s>32; s>>=1) {//32 = warpsize if(tid < s) { sdata[tid] += sdata[tid+s]; } __syncthreads(); } if(tid<32) {// Warp's zijn SIMD gesynchroniseerd sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } //write result to global if(tid == 0) g_normaldata[blockIdx.x] = sdata[0]; }*/ __global__ void winding(float4* g_linedata, float* g_alpha, float* g_beta, float4* d_origin, float* g_r, float4* d_normal, unsigned int steps) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int modifier = min(i%steps,1); float4 locCord = Cart2Tor(ShiftCoord(g_linedata[i], *d_origin), *d_normal, *g_r); locCord -= Cart2Tor(ShiftCoord(g_linedata[i-modifier], *d_origin), *d_normal, *g_r); //lelijk en langzaam, maar mijn bit-wise magic is niet genoeg om dit netjes te doen if(locCord.y > PI/2.0) { locCord.y -= PI; } else if (locCord.y < -0.5*PI) { locCord.y += PI; } if(locCord.z > PI/2.0) { locCord.z -= PI; } else if (locCord.z < -0.5*PI) { locCord.z += PI; } g_alpha[i] = locCord.y; g_beta[i] = locCord.z; } __global__ void divide(float* enumerator, float* denominator, float* output) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; output[i] = enumerator[i]/denominator[i]; } __global__ void divide(float4* enumerator, float denominator, float4* output) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; output[i] = enumerator[i]/denominator; }
d797c3ed16accb1a07b4b4c7f7c91a79849e97a9.hip
// !!! This is a file automatically generated by hipify!!! #include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { //////// angle, toMarking, dist, fast, etc., (14 output) ///////////////////////////////// by chenyi template <typename Dtype> void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { int count = bottom[0]->count(); int num = bottom[0]->num(); // batch size int dim = count/num; // equals to number of outputs in last layer Dtype y_array[count]; Dtype label[num*14]; Dtype bottom_data[count]; Dtype diff[count]; Dtype* y_array_cuda; hipMalloc((void**)&y_array_cuda,sizeof(Dtype)*count); const Dtype* bottom_data_cuda = bottom[0]->gpu_data(); const Dtype* label_cuda = bottom[1]->gpu_data(); hipMemcpy(bottom_data,bottom_data_cuda,sizeof(Dtype)*count,hipMemcpyDeviceToHost); hipMemcpy(label,label_cuda,sizeof(Dtype)*num*14,hipMemcpyDeviceToHost); for (int i = 0; i < num; ++i) { y_array[i * dim] = label[i*14+0]/1.1+0.5; // angle range ~ [-0.5, 0.5] if (y_array[i * dim]>1.0) y_array[i * dim]=1.0; if (y_array[i * dim]<0.0) y_array[i * dim]=0.0; y_array[i * dim + 1] = label[i*14+1]*0.17778+1.34445; // toMarking_L range ~ [-7, -2.5] y_array[i * dim + 2] = label[i*14+2]*0.14545+0.39091; // toMarking_M range ~ [-2, 3.5] y_array[i * dim + 3] = label[i*14+3]*0.17778-0.34445; // toMarking_R range ~ [2.5, 7] y_array[i * dim + 4] = label[i*14+4]/95.0+0.12; // dist_L range ~ [0, 75] y_array[i * dim + 5] = label[i*14+5]/95.0+0.12; // dist_R range ~ [0, 75] y_array[i * dim + 6] = label[i*14+6]*0.14545+1.48181; // toMarking_LL range ~ [-9.5, -4] y_array[i * dim + 7] = label[i*14+7]*0.16+0.98; // toMarking_ML range ~ [-5.5, -0.5] y_array[i * dim + 8] = label[i*14+8]*0.16+0.02; // toMarking_MR range ~ [0.5, 5.5] y_array[i * dim + 9] = label[i*14+9]*0.14545-0.48181; // toMarking_RR range ~ [4, 9.5] y_array[i * dim + 10] = label[i*14+10]/95.0+0.12; // dist_LL range ~ [0, 75] y_array[i * dim + 11] = label[i*14+11]/95.0+0.12; // dist_MM range ~ [0, 75] y_array[i * dim + 12] = label[i*14+12]/95.0+0.12; // dist_RR range ~ [0, 75] y_array[i * dim + 13] = label[i*14+13]*0.6+0.2; // fast range ~ {0, 1} } hipMemcpy(y_array_cuda,y_array,sizeof(Dtype)*count,hipMemcpyHostToDevice); caffe_gpu_sub(count, bottom_data_cuda, y_array_cuda, diff_.mutable_gpu_data()); Dtype dot; caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); Dtype loss = dot / num / Dtype(2); (*top)[0]->mutable_cpu_data()[0] = loss; hipMemcpy(diff,diff_.gpu_data(),sizeof(Dtype)*count,hipMemcpyDeviceToHost); hipFree(y_array_cuda); //for (int i = 0; i < num; ++i) { int i=25; for (int j = 0; j < dim; ++j) { printf("num: %d, dim: %d, out: %f, y_array: %f, diff: %f \n", i, j, bottom_data[i*dim+j], y_array[i*dim+j], diff[i*dim+j]); fflush(stdout); } //} } ///////////////////////////////// by chenyi template <typename Dtype> void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num(); caffe_gpu_axpby( (*bottom)[i]->count(), // count alpha, // alpha diff_.gpu_data(), // a Dtype(0), // beta (*bottom)[i]->mutable_gpu_diff()); // b } } } INSTANTIATE_CLASS(EuclideanLossLayer); } // namespace caffe
d797c3ed16accb1a07b4b4c7f7c91a79849e97a9.cu
#include <vector> #include "caffe/layer.hpp" #include "caffe/util/io.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { //////// angle, toMarking, dist, fast, etc., (14 output) ///////////////////////////////// by chenyi template <typename Dtype> void EuclideanLossLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>* top) { int count = bottom[0]->count(); int num = bottom[0]->num(); // batch size int dim = count/num; // equals to number of outputs in last layer Dtype y_array[count]; Dtype label[num*14]; Dtype bottom_data[count]; Dtype diff[count]; Dtype* y_array_cuda; cudaMalloc((void**)&y_array_cuda,sizeof(Dtype)*count); const Dtype* bottom_data_cuda = bottom[0]->gpu_data(); const Dtype* label_cuda = bottom[1]->gpu_data(); cudaMemcpy(bottom_data,bottom_data_cuda,sizeof(Dtype)*count,cudaMemcpyDeviceToHost); cudaMemcpy(label,label_cuda,sizeof(Dtype)*num*14,cudaMemcpyDeviceToHost); for (int i = 0; i < num; ++i) { y_array[i * dim] = label[i*14+0]/1.1+0.5; // angle range ~ [-0.5, 0.5] if (y_array[i * dim]>1.0) y_array[i * dim]=1.0; if (y_array[i * dim]<0.0) y_array[i * dim]=0.0; y_array[i * dim + 1] = label[i*14+1]*0.17778+1.34445; // toMarking_L range ~ [-7, -2.5] y_array[i * dim + 2] = label[i*14+2]*0.14545+0.39091; // toMarking_M range ~ [-2, 3.5] y_array[i * dim + 3] = label[i*14+3]*0.17778-0.34445; // toMarking_R range ~ [2.5, 7] y_array[i * dim + 4] = label[i*14+4]/95.0+0.12; // dist_L range ~ [0, 75] y_array[i * dim + 5] = label[i*14+5]/95.0+0.12; // dist_R range ~ [0, 75] y_array[i * dim + 6] = label[i*14+6]*0.14545+1.48181; // toMarking_LL range ~ [-9.5, -4] y_array[i * dim + 7] = label[i*14+7]*0.16+0.98; // toMarking_ML range ~ [-5.5, -0.5] y_array[i * dim + 8] = label[i*14+8]*0.16+0.02; // toMarking_MR range ~ [0.5, 5.5] y_array[i * dim + 9] = label[i*14+9]*0.14545-0.48181; // toMarking_RR range ~ [4, 9.5] y_array[i * dim + 10] = label[i*14+10]/95.0+0.12; // dist_LL range ~ [0, 75] y_array[i * dim + 11] = label[i*14+11]/95.0+0.12; // dist_MM range ~ [0, 75] y_array[i * dim + 12] = label[i*14+12]/95.0+0.12; // dist_RR range ~ [0, 75] y_array[i * dim + 13] = label[i*14+13]*0.6+0.2; // fast range ~ {0, 1} } cudaMemcpy(y_array_cuda,y_array,sizeof(Dtype)*count,cudaMemcpyHostToDevice); caffe_gpu_sub(count, bottom_data_cuda, y_array_cuda, diff_.mutable_gpu_data()); Dtype dot; caffe_gpu_dot(count, diff_.gpu_data(), diff_.gpu_data(), &dot); Dtype loss = dot / num / Dtype(2); (*top)[0]->mutable_cpu_data()[0] = loss; cudaMemcpy(diff,diff_.gpu_data(),sizeof(Dtype)*count,cudaMemcpyDeviceToHost); cudaFree(y_array_cuda); //for (int i = 0; i < num; ++i) { int i=25; for (int j = 0; j < dim; ++j) { printf("num: %d, dim: %d, out: %f, y_array: %f, diff: %f \n", i, j, bottom_data[i*dim+j], y_array[i*dim+j], diff[i*dim+j]); fflush(stdout); } //} } ///////////////////////////////// by chenyi template <typename Dtype> void EuclideanLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom) { for (int i = 0; i < 2; ++i) { if (propagate_down[i]) { const Dtype sign = (i == 0) ? 1 : -1; const Dtype alpha = sign * top[0]->cpu_diff()[0] / (*bottom)[i]->num(); caffe_gpu_axpby( (*bottom)[i]->count(), // count alpha, // alpha diff_.gpu_data(), // a Dtype(0), // beta (*bottom)[i]->mutable_gpu_diff()); // b } } } INSTANTIATE_CLASS(EuclideanLossLayer); } // namespace caffe
5bd70218f71648f4fe8d6785710ba48a8a89f1ef.hip
// !!! This is a file automatically generated by hipify!!! /* check-thread-index.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #define CHECK_CUDA_CALL(call) \ { \ const hipError_t error = call; \ \ if (error != hipSuccess) { \ fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \ __FILE__, __LINE__, \ error, hipGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ } \ void printMatrix(int* matC, int row, int col) { int i; int j; int* pC = matC; printf("Matrix (%d, %d)\n", row, col); for (i = 0; i < row; ++i) { for (j = 0; j < col; ++j) printf("%2d ", pC[j]); printf("\n"); pC += col; } printf("\n"); return; } __global__ void printThreadIndex(int* matA, int row, int col) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int i = y * col + x; printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), " "coordinate: (%d, %d), array index: %d, " "matrix value: %d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, x, y, i, matA[i]); } int main(int argc, char** argv) { int i; int dev; hipDeviceProp_t deviceProp; int matRow; int matCol; int numOfElements; int numOfBytes; int* hostMatA; int* devMatA; /* Setup device */ dev = 0; CHECK_CUDA_CALL(hipGetDeviceProperties(&deviceProp, dev)); printf("Using device %d: %s\n", dev, deviceProp.name); CHECK_CUDA_CALL(hipSetDevice(dev)); /* Set matrix size */ matCol = 8; matRow = 6; numOfElements = matRow * matCol; numOfBytes = numOfElements * sizeof(int); /* Allocate host memory */ hostMatA = (int*)calloc(numOfElements, sizeof(int)); for (i = 0; i < numOfElements; ++i) hostMatA[i] = i; printMatrix(hostMatA, matRow, matCol); /* Allocate device memory */ CHECK_CUDA_CALL(hipMalloc((void**)&devMatA, numOfBytes)); /* Set execution configuration */ dim3 block(4, 2); dim3 grid((matCol + block.x - 1) / block.x, (matRow + block.y - 1) / block.y); /* Transfer matrix data from host */ CHECK_CUDA_CALL(hipMemcpy(devMatA, hostMatA, numOfBytes, hipMemcpyHostToDevice)); /* Call kernel from host */ hipLaunchKernelGGL(( printThreadIndex), dim3(grid), dim3(block), 0, 0, devMatA, matRow, matCol); CHECK_CUDA_CALL(hipDeviceSynchronize()); /* Free device and host memory */ CHECK_CUDA_CALL(hipFree(devMatA)); free(hostMatA); /* Reset device */ CHECK_CUDA_CALL(hipDeviceReset()); return EXIT_SUCCESS; }
5bd70218f71648f4fe8d6785710ba48a8a89f1ef.cu
/* check-thread-index.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #define CHECK_CUDA_CALL(call) \ { \ const cudaError_t error = call; \ \ if (error != cudaSuccess) { \ fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \ __FILE__, __LINE__, \ error, cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ } \ void printMatrix(int* matC, int row, int col) { int i; int j; int* pC = matC; printf("Matrix (%d, %d)\n", row, col); for (i = 0; i < row; ++i) { for (j = 0; j < col; ++j) printf("%2d ", pC[j]); printf("\n"); pC += col; } printf("\n"); return; } __global__ void printThreadIndex(int* matA, int row, int col) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; unsigned int i = y * col + x; printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), " "coordinate: (%d, %d), array index: %d, " "matrix value: %d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, x, y, i, matA[i]); } int main(int argc, char** argv) { int i; int dev; cudaDeviceProp deviceProp; int matRow; int matCol; int numOfElements; int numOfBytes; int* hostMatA; int* devMatA; /* Setup device */ dev = 0; CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using device %d: %s\n", dev, deviceProp.name); CHECK_CUDA_CALL(cudaSetDevice(dev)); /* Set matrix size */ matCol = 8; matRow = 6; numOfElements = matRow * matCol; numOfBytes = numOfElements * sizeof(int); /* Allocate host memory */ hostMatA = (int*)calloc(numOfElements, sizeof(int)); for (i = 0; i < numOfElements; ++i) hostMatA[i] = i; printMatrix(hostMatA, matRow, matCol); /* Allocate device memory */ CHECK_CUDA_CALL(cudaMalloc((void**)&devMatA, numOfBytes)); /* Set execution configuration */ dim3 block(4, 2); dim3 grid((matCol + block.x - 1) / block.x, (matRow + block.y - 1) / block.y); /* Transfer matrix data from host */ CHECK_CUDA_CALL(cudaMemcpy(devMatA, hostMatA, numOfBytes, cudaMemcpyHostToDevice)); /* Call kernel from host */ printThreadIndex<<<grid, block>>>(devMatA, matRow, matCol); CHECK_CUDA_CALL(cudaDeviceSynchronize()); /* Free device and host memory */ CHECK_CUDA_CALL(cudaFree(devMatA)); free(hostMatA); /* Reset device */ CHECK_CUDA_CALL(cudaDeviceReset()); return EXIT_SUCCESS; }
3b04ae9289cbbd4594789993d4e331d6402c7c8d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel1), dim3(1024), dim3(512), 0, 0, d_data); hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; hipMalloc(&d_data, sizeof(int)*numElement); hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); float elapsedTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; hipEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel2), dim3(1024), dim3(512), 0, 0, d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); /*GPU method 2*/ hipMemcpy(d_data, h_data, sizeof(int)*numElement, hipMemcpyHostToDevice); hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel2_opt), dim3(1024), dim3(512), 0, 0, d_data, numElement); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", hipGetErrorString(hipGetLastError())); hipMemcpy(h_data, d_data, sizeof(int)*numElement, hipMemcpyDeviceToHost); hipFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
3b04ae9289cbbd4594789993d4e331d6402c7c8d.cu
#include <stdio.h> #include <stdlib.h> __global__ void kernel1(int* d_data) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; d_data[tid] += 1; } __global__ void kernel2(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; const int numElementPerThread = numElement/nthread; const int start = tid*numElementPerThread; int end = start + numElementPerThread; for(int i = start; i < end; i++) { d_data[i] += 1; } } // __global__ void kernel2_opt(int* d_data, const int numElement) { const int tid = blockDim.x*blockIdx.x + threadIdx.x; const int nthread = blockDim.x*gridDim.x; for(int i = tid; i < numElement; i += nthread) { d_data[i] += 1; } } void demo1() { const int numElement = 512*1024; int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i] + 1; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel1<<<1024, 512>>>(d_data); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("Kernel elapsed time: %.3f ms\n", elapsedTime); printf("kernel1: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED.\n"); return; } } printf("Test pass...\n"); free(h_data); free(gold); } void demo2(const int numElement) { printf("numElement = %d\n", numElement); int* h_data = (int*)malloc(sizeof(int)*numElement); int* gold = (int*)malloc(sizeof(int)*numElement); for(int i = 0; i < numElement; i++) { h_data[i] = rand(); gold[i] = h_data[i]; } int* d_data; cudaMalloc(&d_data, sizeof(int)*numElement); cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*CPU*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); for(int i = 0; i < numElement; i++) { gold[i] += 1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("CPU elapsed time: %.3f ms\n", elapsedTime); /*GPU method 1*/ elapsedTime = 0.0f; cudaEventRecord(start, 0); kernel2<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2 elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); /*GPU method 2*/ cudaMemcpy(d_data, h_data, sizeof(int)*numElement, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); kernel2_opt<<<1024, 512>>>(d_data, numElement); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("kernel2_opt elapsed time: %.3f ms\n", elapsedTime); printf("kernel2: %s\n", cudaGetErrorString(cudaGetLastError())); cudaMemcpy(h_data, d_data, sizeof(int)*numElement, cudaMemcpyDeviceToHost); cudaFree(d_data); for(int i = 0; i < numElement; i++) { if(h_data[i] != gold[i]) { printf("!!!ERROR, TEST FAILED. i = %d: %d, %d\n", i, h_data[i], gold[i]); return; } } printf("Test pass...\n"); free(h_data); free(gold); } int main() { int numElement = 1*1024*1024; demo2(numElement); //execute once to warm up for performance measurement printf("\n\nstart ............................................\n"); printf("demo2 started!\n"); for(int i = numElement; i <= 32*1024*1024; i*=2) { demo2(i); printf("\n"); } printf("demo1 started!\n"); demo1(); return EXIT_SUCCESS; }
6c36ae4398ffe9cf6d6dc7d79e932a4e40643088.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // generated by Dendro-GR SymPyGR code gernation framework //date: 2018-10-14 00:09:24 #include "rhs_bssn.cuh" namespace cuda { /**@brief compute RHS @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __global__ void __computeBSSNRHS(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* __dendroBlkList, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams ,const hipDeviceProp_t* __deviceProperties, unsigned int stream_id){ // shared memory allocation for deriv and rhs computation __shared__ double __sm_base[5248]; __shared__ bool beta0_bool[1728]; __shared__ bool beta1_bool[1728]; __shared__ bool beta2_bool[1728]; for(unsigned int blk=__gpuBlockMap[2*blockIdx.x];blk<__gpuBlockMap[2*blockIdx.x+1];++blk){ // blocks assigned to each gpu block const _Block * dblock=&__dendroBlkList[blk]; // compute the derivatives __compute_derivatives(__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,beta0_bool,beta1_bool,beta2_bool,stream_id); __syncthreads(); // compute the RHS __compute_a_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_b_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_gt_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_chi_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_At_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_K_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_Gt_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_B_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __ko_dissipation(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); }// end of the block loop } // end of kernel /**@brief compute derivs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties */ __device__ void __compute_derivatives(const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, bool* beta0_bool, bool* beta1_bool, bool* beta2_bool, unsigned int stream_id){ const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={12,12,12}; double * unzipVarInShared = __sm_base + 0; double * unzipVarOutShared0 = __sm_base + 1728; double * unzipVarOutShared1 = __sm_base + 3456; const unsigned int Lb = 0;// load begin bound const unsigned int Le = sz[0]-0;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*3)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(0,(int)(0 + tile_sz[2]*iter_z -2*iter_z*3)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-0-1); if((ijk_lm[5]-ijk_lm[4]+1)<=9) ijk_lm[4]=ijk_lm[4]-(9-(ijk_lm[5]-ijk_lm[4]+1)) ; for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(0,(int)(0 + tile_sz[1]*iter_y -2*iter_y*3)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-0-1); if((ijk_lm[3]-ijk_lm[2]+1)<=9) ijk_lm[2]=ijk_lm[2]-(9-(ijk_lm[3]-ijk_lm[2]+1)) ; for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(0,(int)(0 + tile_sz[0]*iter_x -2*iter_x*3)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-0-1); if((ijk_lm[1]-ijk_lm[0]+1)<=9) ijk_lm[0]=ijk_lm[0]-(9-(ijk_lm[1]-ijk_lm[0]+1)) ; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) unzipVarOutShared0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) unzipVarOutShared1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); cuda::__extractSign3D<double>((double *)unzipVarInShared,(bool *) beta0_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__extractSign3D<double>((double *)unzipVarOutShared0,(bool *) beta1_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__extractSign3D<double>((double *)unzipVarOutShared1,(bool *) beta2_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable alpha _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable alpha _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable alpha _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta0 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta1 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta2 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable chi _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable chi _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable chi _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable K _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable K _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable K _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt0 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt1 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt2 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt3 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt3 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt3 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt4 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt4 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt4 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt5 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt5 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt5 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At3 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At3 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At3 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At4 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At4 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At4 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At5 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At5 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At5 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block tile loop x } // end of block tile loop y } // end of block tile loop z } // end of function __compute_derivatives /**@brief compute a_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_a_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for a_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={8,8,8}; //input vars begin double * alpha = __sm_base + 0; double * K = __sm_base + 512; double * beta0 = __sm_base + 1024; double * beta1 = __sm_base + 1536; double * beta2 = __sm_base + 2048; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_alpha = __sm_base + 2560; double * agrad_2_alpha = __sm_base + 3072; double * agrad_0_alpha = __sm_base + 3584; // deriv vars end // output vars begin double * a_rhs = __sm_base + 4096; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 12 // Dendro: printing temp variables // Dendro: printing variables a_rhs[pp] = -2*K[pp]*alpha[pp] + lambda[0]*(beta0[pp]*agrad_0_alpha[pp] + beta1[pp]*agrad_1_alpha[pp] + beta2[pp]*agrad_2_alpha[pp]); // Dendro: reduced ops: 12 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(a_rhs, &__unzipOutVar[cuda::VAR::U_ALPHA][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_a_rhs /**@brief compute b_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_b_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for b_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={6,6,6}; //input vars begin double * beta1 = __sm_base + 0; double * beta2 = __sm_base + 216; double * alpha = __sm_base + 432; double * B2 = __sm_base + 648; double * beta0 = __sm_base + 864; double * B1 = __sm_base + 1080; double * B0 = __sm_base + 1296; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_beta1 = __sm_base + 1512; double * agrad_1_beta2 = __sm_base + 1728; double * agrad_0_beta2 = __sm_base + 1944; double * agrad_2_beta0 = __sm_base + 2160; double * agrad_2_beta2 = __sm_base + 2376; double * agrad_2_beta1 = __sm_base + 2592; double * agrad_0_beta1 = __sm_base + 2808; double * agrad_1_beta0 = __sm_base + 3024; double * agrad_0_beta0 = __sm_base + 3240; // deriv vars end // output vars begin double * b_rhs0 = __sm_base + 3456; double * b_rhs2 = __sm_base + 3672; double * b_rhs1 = __sm_base + 3888; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 51 // Dendro: printing temp variables const double DENDRO_0 = (3.0L/4.0L)*alpha[pp]*lambda_f[1] + (3.0L/4.0L)*lambda_f[0]; // Dendro: printing variables b_rhs0[pp] = B0[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta0[pp] + beta1[pp]*agrad_1_beta0[pp] + beta2[pp]*agrad_2_beta0[pp]); b_rhs1[pp] = B1[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta1[pp] + beta1[pp]*agrad_1_beta1[pp] + beta2[pp]*agrad_2_beta1[pp]); b_rhs2[pp] = B2[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta2[pp] + beta1[pp]*agrad_1_beta2[pp] + beta2[pp]*agrad_2_beta2[pp]); // Dendro: reduced ops: 39 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(b_rhs0, &__unzipOutVar[cuda::VAR::U_BETA0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(b_rhs2, &__unzipOutVar[cuda::VAR::U_BETA2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(b_rhs1, &__unzipOutVar[cuda::VAR::U_BETA1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_b_rhs /**@brief compute gt_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_gt_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for gt_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={4,4,4}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 64; double * gt3 = __sm_base + 128; double * beta2 = __sm_base + 192; double * At1 = __sm_base + 256; double * alpha = __sm_base + 320; double * gt4 = __sm_base + 384; double * gt2 = __sm_base + 448; double * gt5 = __sm_base + 512; double * At3 = __sm_base + 576; double * At4 = __sm_base + 640; double * At0 = __sm_base + 704; double * At2 = __sm_base + 768; double * beta0 = __sm_base + 832; double * gt0 = __sm_base + 896; double * At5 = __sm_base + 960; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_2_gt5 = __sm_base + 1024; double * agrad_2_gt1 = __sm_base + 1088; double * grad_0_beta0 = __sm_base + 1152; double * agrad_0_gt0 = __sm_base + 1216; double * agrad_1_gt3 = __sm_base + 1280; double * grad_1_beta1 = __sm_base + 1344; double * grad_2_beta0 = __sm_base + 1408; double * agrad_1_gt0 = __sm_base + 1472; double * agrad_1_gt4 = __sm_base + 1536; double * grad_0_beta1 = __sm_base + 1600; double * agrad_2_gt2 = __sm_base + 1664; double * grad_2_beta2 = __sm_base + 1728; double * agrad_2_gt0 = __sm_base + 1792; double * agrad_1_gt2 = __sm_base + 1856; double * agrad_0_gt5 = __sm_base + 1920; double * agrad_1_gt5 = __sm_base + 1984; double * agrad_0_gt3 = __sm_base + 2048; double * agrad_0_gt2 = __sm_base + 2112; double * agrad_1_gt1 = __sm_base + 2176; double * agrad_0_gt1 = __sm_base + 2240; double * grad_2_beta1 = __sm_base + 2304; double * agrad_2_gt4 = __sm_base + 2368; double * grad_1_beta0 = __sm_base + 2432; double * agrad_2_gt3 = __sm_base + 2496; double * grad_0_beta2 = __sm_base + 2560; double * grad_1_beta2 = __sm_base + 2624; double * agrad_0_gt4 = __sm_base + 2688; // deriv vars end // output vars begin double * gt_rhs01 = __sm_base + 2752; double * gt_rhs22 = __sm_base + 2816; double * gt_rhs11 = __sm_base + 2880; double * gt_rhs00 = __sm_base + 2944; double * gt_rhs12 = __sm_base + 3008; double * gt_rhs02 = __sm_base + 3072; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 210 // Dendro: printing temp variables const double DENDRO_0 = 2*alpha[pp]; const double DENDRO_1 = grad_0_beta0[pp]; const double DENDRO_2 = (2.0L/3.0L)*gt0[pp]; const double DENDRO_3 = grad_1_beta1[pp]; const double DENDRO_4 = grad_2_beta2[pp]; const double DENDRO_5 = 2*gt1[pp]; const double DENDRO_6 = grad_0_beta1[pp]; const double DENDRO_7 = 2*gt2[pp]; const double DENDRO_8 = grad_0_beta2[pp]; const double DENDRO_9 = grad_1_beta0[pp]; const double DENDRO_10 = grad_1_beta2[pp]; const double DENDRO_11 = (1.0L/3.0L)*gt1[pp]; const double DENDRO_12 = (2.0L/3.0L)*DENDRO_4; const double DENDRO_13 = grad_2_beta0[pp]; const double DENDRO_14 = grad_2_beta1[pp]; const double DENDRO_15 = (1.0L/3.0L)*gt2[pp]; const double DENDRO_16 = (2.0L/3.0L)*DENDRO_3; const double DENDRO_17 = (2.0L/3.0L)*DENDRO_1; const double DENDRO_18 = 2*gt4[pp]; const double DENDRO_19 = (1.0L/3.0L)*gt4[pp]; // Dendro: printing variables gt_rhs00[pp] = -At0[pp]*DENDRO_0 + (4.0L/3.0L)*DENDRO_1*gt0[pp] - DENDRO_2*DENDRO_3 - DENDRO_2*DENDRO_4 + DENDRO_5*DENDRO_6 + DENDRO_7*DENDRO_8 + beta0[pp]*agrad_0_gt0[pp] + beta1[pp]*agrad_1_gt0[pp] + beta2[pp]*agrad_2_gt0[pp]; gt_rhs01[pp] = -At1[pp]*DENDRO_0 + DENDRO_1*DENDRO_11 + DENDRO_10*gt2[pp] + DENDRO_11*DENDRO_3 - DENDRO_12*gt1[pp] + DENDRO_6*gt3[pp] + DENDRO_8*gt4[pp] + DENDRO_9*gt0[pp] + beta0[pp]*agrad_0_gt1[pp] + beta1[pp]*agrad_1_gt1[pp] + beta2[pp]*agrad_2_gt1[pp]; gt_rhs02[pp] = -At2[pp]*DENDRO_0 + DENDRO_1*DENDRO_15 + DENDRO_13*gt0[pp] + DENDRO_14*gt1[pp] + DENDRO_15*DENDRO_4 - DENDRO_16*gt2[pp] + DENDRO_6*gt4[pp] + DENDRO_8*gt5[pp] + beta0[pp]*agrad_0_gt2[pp] + beta1[pp]*agrad_1_gt2[pp] + beta2[pp]*agrad_2_gt2[pp]; gt_rhs11[pp] = -At3[pp]*DENDRO_0 + DENDRO_10*DENDRO_18 - DENDRO_12*gt3[pp] - DENDRO_17*gt3[pp] + (4.0L/3.0L)*DENDRO_3*gt3[pp] + DENDRO_5*DENDRO_9 + beta0[pp]*agrad_0_gt3[pp] + beta1[pp]*agrad_1_gt3[pp] + beta2[pp]*agrad_2_gt3[pp]; gt_rhs12[pp] = -At4[pp]*DENDRO_0 + DENDRO_10*gt5[pp] + DENDRO_13*gt1[pp] + DENDRO_14*gt3[pp] - DENDRO_17*gt4[pp] + DENDRO_19*DENDRO_3 + DENDRO_19*DENDRO_4 + DENDRO_9*gt2[pp] + beta0[pp]*agrad_0_gt4[pp] + beta1[pp]*agrad_1_gt4[pp] + beta2[pp]*agrad_2_gt4[pp]; gt_rhs22[pp] = -At5[pp]*DENDRO_0 + DENDRO_13*DENDRO_7 + DENDRO_14*DENDRO_18 - DENDRO_16*gt5[pp] - DENDRO_17*gt5[pp] + (4.0L/3.0L)*DENDRO_4*gt5[pp] + beta0[pp]*agrad_0_gt5[pp] + beta1[pp]*agrad_1_gt5[pp] + beta2[pp]*agrad_2_gt5[pp]; // Dendro: reduced ops: 162 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(gt_rhs01, &__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs22, &__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs11, &__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs00, &__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs12, &__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs02, &__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_gt_rhs /**@brief compute chi_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_chi_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for chi_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={7,7,7}; //input vars begin double * K = __sm_base + 0; double * beta1 = __sm_base + 343; double * beta2 = __sm_base + 686; double * alpha = __sm_base + 1029; double * beta0 = __sm_base + 1372; double * chi = __sm_base + 1715; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_chi = __sm_base + 2058; double * grad_0_beta0 = __sm_base + 2401; double * agrad_0_chi = __sm_base + 2744; double * agrad_2_chi = __sm_base + 3087; double * grad_1_beta1 = __sm_base + 3430; double * grad_2_beta2 = __sm_base + 3773; // deriv vars end // output vars begin double * chi_rhs = __sm_base + 4116; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 22 // Dendro: printing temp variables const double DENDRO_0 = (2.0L/3.0L)*chi[pp]; // Dendro: printing variables chi_rhs[pp] = DENDRO_0*K[pp]*alpha[pp] - DENDRO_0*(grad_0_beta0[pp] + grad_1_beta1[pp] + grad_2_beta2[pp]) + beta0[pp]*agrad_0_chi[pp] + beta1[pp]*agrad_1_chi[pp] + beta2[pp]*agrad_2_chi[pp]; // Dendro: reduced ops: 20 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(chi_rhs, &__unzipOutVar[cuda::VAR::U_CHI][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_chi_rhs /**@brief compute At_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_At_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for At_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * K = __sm_base + 0; double * gt1 = __sm_base + 27; double * beta1 = __sm_base + 54; double * gt3 = __sm_base + 81; double * At1 = __sm_base + 108; double * gt5 = __sm_base + 135; double * alpha = __sm_base + 162; double * gt4 = __sm_base + 189; double * gt2 = __sm_base + 216; double * beta2 = __sm_base + 243; double * At3 = __sm_base + 270; double * At4 = __sm_base + 297; double * At0 = __sm_base + 324; double * At2 = __sm_base + 351; double * beta0 = __sm_base + 378; double * gt0 = __sm_base + 405; double * chi = __sm_base + 432; double * At5 = __sm_base + 459; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad2_0_0_gt3 = __sm_base + 486; double * grad2_2_2_alpha = __sm_base + 513; double * grad2_1_2_gt1 = __sm_base + 540; double * grad_2_gt3 = __sm_base + 567; double * grad_1_beta1 = __sm_base + 594; double * grad_0_Gt1 = __sm_base + 621; double * grad_1_gt5 = __sm_base + 648; double * grad2_0_2_gt5 = __sm_base + 675; double * grad2_1_1_alpha = __sm_base + 702; double * agrad_1_At2 = __sm_base + 729; double * grad2_0_1_gt0 = __sm_base + 756; double * agrad_2_At1 = __sm_base + 783; double * grad_1_gt1 = __sm_base + 810; double * agrad_0_At3 = __sm_base + 837; double * agrad_1_At4 = __sm_base + 864; double * grad_2_beta2 = __sm_base + 891; double * grad_0_chi = __sm_base + 918; double * agrad_2_At4 = __sm_base + 945; double * grad2_0_2_gt4 = __sm_base + 972; double * grad_1_chi = __sm_base + 999; double * grad2_0_1_gt1 = __sm_base + 1026; double * grad2_1_2_alpha = __sm_base + 1053; double * grad2_1_2_gt3 = __sm_base + 1080; double * grad2_2_2_gt1 = __sm_base + 1107; double * agrad_2_At3 = __sm_base + 1134; double * grad2_0_0_gt2 = __sm_base + 1161; double * grad_1_beta0 = __sm_base + 1188; double * grad_0_gt2 = __sm_base + 1215; double * grad_0_beta2 = __sm_base + 1242; double * grad2_2_2_gt4 = __sm_base + 1269; double * agrad_2_At5 = __sm_base + 1296; double * grad_0_gt5 = __sm_base + 1323; double * grad2_0_1_alpha = __sm_base + 1350; double * agrad_2_At2 = __sm_base + 1377; double * grad_0_Gt2 = __sm_base + 1404; double * grad_0_gt4 = __sm_base + 1431; double * grad2_2_2_chi = __sm_base + 1458; double * grad2_0_2_gt3 = __sm_base + 1485; double * agrad_1_At1 = __sm_base + 1512; double * grad2_0_0_gt4 = __sm_base + 1539; double * grad_0_gt1 = __sm_base + 1566; double * grad2_0_0_gt0 = __sm_base + 1593; double * agrad_0_At4 = __sm_base + 1620; double * grad2_1_1_gt4 = __sm_base + 1647; double * grad2_0_2_chi = __sm_base + 1674; double * grad2_0_1_chi = __sm_base + 1701; double * grad2_0_2_gt1 = __sm_base + 1728; double * agrad_0_At2 = __sm_base + 1755; double * grad2_0_0_gt5 = __sm_base + 1782; double * grad_2_Gt2 = __sm_base + 1809; double * grad_1_Gt2 = __sm_base + 1836; double * agrad_0_At0 = __sm_base + 1863; double * grad_0_gt3 = __sm_base + 1890; double * grad_2_beta1 = __sm_base + 1917; double * grad_1_gt3 = __sm_base + 1944; double * grad2_1_1_gt3 = __sm_base + 1971; double * grad2_0_2_alpha = __sm_base + 1998; double * grad2_0_1_gt5 = __sm_base + 2025; double * agrad_2_At0 = __sm_base + 2052; double * grad2_2_2_gt0 = __sm_base + 2079; double * grad_1_gt2 = __sm_base + 2106; double * grad2_0_0_gt1 = __sm_base + 2133; double * grad2_0_1_gt3 = __sm_base + 2160; double * grad_2_Gt0 = __sm_base + 2187; double * grad_1_alpha = __sm_base + 2214; double * grad2_1_2_gt4 = __sm_base + 2241; double * grad2_1_1_gt5 = __sm_base + 2268; double * grad_2_gt4 = __sm_base + 2295; double * grad2_2_2_gt5 = __sm_base + 2322; double * grad_2_gt2 = __sm_base + 2349; double * agrad_1_At0 = __sm_base + 2376; double * grad2_2_2_gt3 = __sm_base + 2403; double * grad_2_gt1 = __sm_base + 2430; double * grad2_0_2_gt2 = __sm_base + 2457; double * grad_1_gt0 = __sm_base + 2484; double * grad_0_beta0 = __sm_base + 2511; double * grad_1_Gt0 = __sm_base + 2538; double * grad2_1_2_gt5 = __sm_base + 2565; double * grad_2_gt0 = __sm_base + 2592; double * grad_2_Gt1 = __sm_base + 2619; double * grad2_1_1_gt2 = __sm_base + 2646; double * grad2_2_2_gt2 = __sm_base + 2673; double * grad_2_alpha = __sm_base + 2700; double * agrad_1_At5 = __sm_base + 2727; double * grad_2_beta0 = __sm_base + 2754; double * grad_1_gt4 = __sm_base + 2781; double * grad2_1_1_gt0 = __sm_base + 2808; double * grad2_0_2_gt0 = __sm_base + 2835; double * grad_0_beta1 = __sm_base + 2862; double * grad_0_alpha = __sm_base + 2889; double * grad_1_Gt1 = __sm_base + 2916; double * grad2_1_2_gt0 = __sm_base + 2943; double * grad2_0_0_alpha = __sm_base + 2970; double * grad2_0_1_gt2 = __sm_base + 2997; double * grad_0_gt0 = __sm_base + 3024; double * grad2_1_2_gt2 = __sm_base + 3051; double * grad_2_gt5 = __sm_base + 3078; double * agrad_0_At1 = __sm_base + 3105; double * agrad_1_At3 = __sm_base + 3132; double * grad_2_chi = __sm_base + 3159; double * grad2_0_0_chi = __sm_base + 3186; double * agrad_0_At5 = __sm_base + 3213; double * grad2_1_1_gt1 = __sm_base + 3240; double * grad_0_Gt0 = __sm_base + 3267; double * grad2_1_1_chi = __sm_base + 3294; double * grad2_0_1_gt4 = __sm_base + 3321; double * grad2_1_2_chi = __sm_base + 3348; double * grad_1_beta2 = __sm_base + 3375; // deriv vars end // output vars begin double * At_rhs12 = __sm_base + 3402; double * At_rhs11 = __sm_base + 3429; double * At_rhs22 = __sm_base + 3456; double * At_rhs02 = __sm_base + 3483; double * At_rhs00 = __sm_base + 3510; double * At_rhs01 = __sm_base + 3537; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 630012 // Dendro: printing temp variables const double DENDRO_0 = grad_0_beta0[pp]; const double DENDRO_1 = (2.0L/3.0L)*At0[pp]; const double DENDRO_2 = grad_1_beta1[pp]; const double DENDRO_3 = grad_2_beta2[pp]; const double DENDRO_4 = 2*At1[pp]; const double DENDRO_5 = grad_0_beta1[pp]; const double DENDRO_6 = 2*At2[pp]; const double DENDRO_7 = grad_0_beta2[pp]; const double DENDRO_8 = pow(gt4[pp], 2); const double DENDRO_9 = DENDRO_8*gt0[pp]; const double DENDRO_10 = pow(gt1[pp], 2); const double DENDRO_11 = DENDRO_10*gt5[pp]; const double DENDRO_12 = pow(gt2[pp], 2); const double DENDRO_13 = DENDRO_12*gt3[pp]; const double DENDRO_14 = gt0[pp]*gt3[pp]; const double DENDRO_15 = DENDRO_14*gt5[pp]; const double DENDRO_16 = gt1[pp]*gt2[pp]; const double DENDRO_17 = 2*DENDRO_16*gt4[pp]; const double DENDRO_18 = DENDRO_11 + DENDRO_13 - DENDRO_15 - DENDRO_17 + DENDRO_9; const double DENDRO_19 = 1.0/DENDRO_18; const double DENDRO_20 = 2*At1[pp]*DENDRO_19; const double DENDRO_21 = gt1[pp]*gt5[pp]; const double DENDRO_22 = gt2[pp]*gt4[pp]; const double DENDRO_23 = DENDRO_21 - DENDRO_22; const double DENDRO_24 = gt0[pp]*gt4[pp]; const double DENDRO_25 = -DENDRO_16 + DENDRO_24; const double DENDRO_26 = -DENDRO_12 + gt0[pp]*gt5[pp]; const double DENDRO_27 = 2*At0[pp]*DENDRO_19; const double DENDRO_28 = At1[pp]*DENDRO_23; const double DENDRO_29 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_30 = -At2[pp]*DENDRO_29; const double DENDRO_31 = -DENDRO_8 + gt3[pp]*gt5[pp]; const double DENDRO_32 = 2*At2[pp]*DENDRO_19; const double DENDRO_33 = -DENDRO_10 + DENDRO_14; const double DENDRO_34 = (1.0L/12.0L)*chi[pp]; const double DENDRO_35 = grad2_0_0_alpha[pp]; const double DENDRO_36 = grad_1_alpha[pp]; const double DENDRO_37 = 1.0/chi[pp]; const double DENDRO_38 = grad_2_chi[pp]; const double DENDRO_39 = grad_0_chi[pp]; const double DENDRO_40 = grad_1_chi[pp]; const double DENDRO_41 = DENDRO_26*DENDRO_40; const double DENDRO_42 = DENDRO_23*DENDRO_39 + DENDRO_25*DENDRO_38 - DENDRO_41; const double DENDRO_43 = 0.5*DENDRO_37*DENDRO_42; const double DENDRO_44 = grad_0_gt0[pp]; const double DENDRO_45 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_46 = grad_0_gt2[pp]; const double DENDRO_47 = 1.0*DENDRO_46; const double DENDRO_48 = grad_2_gt0[pp]; const double DENDRO_49 = 0.5*DENDRO_48; const double DENDRO_50 = DENDRO_47 - DENDRO_49; const double DENDRO_51 = grad_0_gt1[pp]; const double DENDRO_52 = 1.0*DENDRO_51; const double DENDRO_53 = grad_1_gt0[pp]; const double DENDRO_54 = 0.5*DENDRO_53; const double DENDRO_55 = DENDRO_52 - DENDRO_54; const double DENDRO_56 = DENDRO_26*DENDRO_55; const double DENDRO_57 = DENDRO_25*DENDRO_50 + DENDRO_44*DENDRO_45 - DENDRO_56; const double DENDRO_58 = DENDRO_19*DENDRO_36*(DENDRO_43*gt0[pp] + DENDRO_57); const double DENDRO_59 = grad_2_alpha[pp]; const double DENDRO_60 = 12*DENDRO_19*DENDRO_59; const double DENDRO_61 = DENDRO_25*DENDRO_40; const double DENDRO_62 = DENDRO_29*DENDRO_39; const double DENDRO_63 = DENDRO_33*DENDRO_38; const double DENDRO_64 = DENDRO_61 - DENDRO_62 - DENDRO_63; const double DENDRO_65 = 0.5*DENDRO_37*DENDRO_64; const double DENDRO_66 = DENDRO_65*gt0[pp]; const double DENDRO_67 = DENDRO_25*DENDRO_55; const double DENDRO_68 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_69 = DENDRO_44*DENDRO_68; const double DENDRO_70 = DENDRO_33*DENDRO_50; const double DENDRO_71 = DENDRO_69 + DENDRO_70; const double DENDRO_72 = -DENDRO_67 + DENDRO_71; const double DENDRO_73 = grad_0_alpha[pp]; const double DENDRO_74 = 0.5*DENDRO_31*DENDRO_44; const double DENDRO_75 = DENDRO_19*DENDRO_74; const double DENDRO_76 = DENDRO_29*DENDRO_50; const double DENDRO_77 = DENDRO_19*DENDRO_76; const double DENDRO_78 = DENDRO_23*DENDRO_55; const double DENDRO_79 = DENDRO_19*DENDRO_78; const double DENDRO_80 = 1.0/(-DENDRO_11 - DENDRO_13 + DENDRO_15 + DENDRO_17 - DENDRO_9); const double DENDRO_81 = -DENDRO_21 + DENDRO_22; const double DENDRO_82 = DENDRO_29*DENDRO_38; const double DENDRO_83 = DENDRO_31*DENDRO_39; const double DENDRO_84 = DENDRO_82 + DENDRO_83; const double DENDRO_85 = DENDRO_40*DENDRO_81 + DENDRO_84; const double DENDRO_86 = DENDRO_37*(-1.0*DENDRO_39 + 0.5*DENDRO_80*DENDRO_85*gt0[pp]); const double DENDRO_87 = 3*alpha[pp]; const double DENDRO_88 = grad_0_Gt0[pp]; const double DENDRO_89 = 4*gt1[pp]; const double DENDRO_90 = grad_0_Gt1[pp]; const double DENDRO_91 = 4*gt2[pp]; const double DENDRO_92 = grad_0_Gt2[pp]; const double DENDRO_93 = pow(chi[pp], -2); const double DENDRO_94 = pow(DENDRO_39, 2); const double DENDRO_95 = 4.0*DENDRO_19*DENDRO_25; const double DENDRO_96 = 4*DENDRO_19*DENDRO_29; const double DENDRO_97 = 4.0*DENDRO_19*DENDRO_23; const double DENDRO_98 = 2.0*DENDRO_19*DENDRO_33; const double DENDRO_99 = 2.0*DENDRO_19*DENDRO_26; const double DENDRO_100 = 2.0*DENDRO_19*DENDRO_31; const double DENDRO_101 = pow(DENDRO_18, -2); const double DENDRO_102 = 4*DENDRO_101*DENDRO_33; const double DENDRO_103 = grad_0_gt4[pp]; const double DENDRO_104 = 0.25*DENDRO_103; const double DENDRO_105 = -DENDRO_104; const double DENDRO_106 = grad_1_gt2[pp]; const double DENDRO_107 = 0.25*DENDRO_106; const double DENDRO_108 = grad_2_gt1[pp]; const double DENDRO_109 = 0.75*DENDRO_108; const double DENDRO_110 = grad_0_gt5[pp]; const double DENDRO_111 = DENDRO_103 - DENDRO_106 + DENDRO_108; const double DENDRO_112 = DENDRO_111*DENDRO_26; const double DENDRO_113 = DENDRO_110*DENDRO_25 - DENDRO_112 + DENDRO_23*DENDRO_48; const double DENDRO_114 = 4*DENDRO_101; const double DENDRO_115 = 2.0*DENDRO_101*DENDRO_23; const double DENDRO_116 = DENDRO_23*DENDRO_53; const double DENDRO_117 = grad_0_gt3[pp]; const double DENDRO_118 = DENDRO_117*DENDRO_26; const double DENDRO_119 = DENDRO_103 + DENDRO_106 - DENDRO_108; const double DENDRO_120 = DENDRO_119*DENDRO_25; const double DENDRO_121 = DENDRO_116 - DENDRO_118 + DENDRO_120; const double DENDRO_122 = DENDRO_117*DENDRO_57; const double DENDRO_123 = 4*DENDRO_101*DENDRO_25; const double DENDRO_124 = 0.25*DENDRO_117; const double DENDRO_125 = DENDRO_113*DENDRO_124; const double DENDRO_126 = -DENDRO_103 + DENDRO_106 + DENDRO_108; const double DENDRO_127 = DENDRO_121*DENDRO_126; const double DENDRO_128 = 2.0*DENDRO_101*DENDRO_29; const double DENDRO_129 = 4*DENDRO_101*DENDRO_29; const double DENDRO_130 = 0.25*DENDRO_53; const double DENDRO_131 = -DENDRO_130; const double DENDRO_132 = DENDRO_131 + 0.5*DENDRO_51; const double DENDRO_133 = DENDRO_113*DENDRO_132; const double DENDRO_134 = 4*DENDRO_101*DENDRO_23; const double DENDRO_135 = DENDRO_121*DENDRO_132; const double DENDRO_136 = 0.5*DENDRO_117; const double DENDRO_137 = grad_1_gt1[pp]; const double DENDRO_138 = 1.0*DENDRO_137; const double DENDRO_139 = -DENDRO_138; const double DENDRO_140 = DENDRO_136 + DENDRO_139; const double DENDRO_141 = DENDRO_100*grad2_0_0_gt0[pp] - DENDRO_102*DENDRO_113*(DENDRO_105 + DENDRO_107 + DENDRO_109) - DENDRO_114*DENDRO_31*DENDRO_57*(DENDRO_52 + DENDRO_54) + DENDRO_115*(DENDRO_121*DENDRO_53 + DENDRO_122) + DENDRO_123*(DENDRO_125 + 0.5*DENDRO_127) - DENDRO_128*(DENDRO_111*DENDRO_57 + DENDRO_113*DENDRO_53) - DENDRO_129*(DENDRO_126*DENDRO_57 + DENDRO_133) + DENDRO_134*(DENDRO_135 - 2*DENDRO_140*DENDRO_57) + 4*DENDRO_88*gt0[pp] + DENDRO_89*DENDRO_90 + DENDRO_91*DENDRO_92 - DENDRO_93*DENDRO_94 - DENDRO_95*grad2_1_2_gt0[pp] + DENDRO_96*grad2_0_2_gt0[pp] - DENDRO_97*grad2_0_1_gt0[pp] + DENDRO_98*grad2_2_2_gt0[pp] + DENDRO_99*grad2_1_1_gt0[pp]; const double DENDRO_142 = 3.0*DENDRO_101*DENDRO_33; const double DENDRO_143 = DENDRO_111*DENDRO_23; const double DENDRO_144 = DENDRO_110*DENDRO_29; const double DENDRO_145 = DENDRO_31*DENDRO_48; const double DENDRO_146 = DENDRO_144 + DENDRO_145; const double DENDRO_147 = -DENDRO_143 + DENDRO_146; const double DENDRO_148 = DENDRO_147*DENDRO_48; const double DENDRO_149 = 3.0*DENDRO_101*DENDRO_26; const double DENDRO_150 = DENDRO_117*DENDRO_23; const double DENDRO_151 = DENDRO_31*DENDRO_53; const double DENDRO_152 = DENDRO_119*DENDRO_29; const double DENDRO_153 = DENDRO_151 + DENDRO_152; const double DENDRO_154 = -DENDRO_150 + DENDRO_153; const double DENDRO_155 = DENDRO_154*DENDRO_53; const double DENDRO_156 = 4*DENDRO_101*DENDRO_121*DENDRO_26; const double DENDRO_157 = 6.0*DENDRO_101*DENDRO_44; const double DENDRO_158 = DENDRO_74 + DENDRO_76; const double DENDRO_159 = DENDRO_158 - DENDRO_78; const double DENDRO_160 = 0.25*DENDRO_110; const double DENDRO_161 = grad_2_gt2[pp]; const double DENDRO_162 = 1.0*DENDRO_161; const double DENDRO_163 = -DENDRO_162; const double DENDRO_164 = DENDRO_29*DENDRO_48; const double DENDRO_165 = DENDRO_110*DENDRO_33; const double DENDRO_166 = DENDRO_111*DENDRO_25; const double DENDRO_167 = -DENDRO_164 - DENDRO_165 + DENDRO_166; const double DENDRO_168 = 4*DENDRO_101*DENDRO_167*DENDRO_33; const double DENDRO_169 = DENDRO_117*DENDRO_25; const double DENDRO_170 = DENDRO_29*DENDRO_53; const double DENDRO_171 = DENDRO_119*DENDRO_33; const double DENDRO_172 = DENDRO_170 + DENDRO_171; const double DENDRO_173 = -DENDRO_169 + DENDRO_172; const double DENDRO_174 = 0.75*DENDRO_106; const double DENDRO_175 = 0.25*DENDRO_108; const double DENDRO_176 = 4*DENDRO_101*DENDRO_26*(DENDRO_105 + DENDRO_174 + DENDRO_175); const double DENDRO_177 = 4*DENDRO_101*DENDRO_31; const double DENDRO_178 = DENDRO_47 + DENDRO_49; const double DENDRO_179 = 0.25*DENDRO_48; const double DENDRO_180 = DENDRO_154*DENDRO_179; const double DENDRO_181 = DENDRO_130*DENDRO_147; const double DENDRO_182 = DENDRO_164 + DENDRO_165; const double DENDRO_183 = -DENDRO_166 + DENDRO_182; const double DENDRO_184 = DENDRO_110*DENDRO_72; const double DENDRO_185 = DENDRO_147*DENDRO_44; const double DENDRO_186 = DENDRO_159*DENDRO_48; const double DENDRO_187 = DENDRO_154*DENDRO_44; const double DENDRO_188 = DENDRO_159*DENDRO_53; const double DENDRO_189 = 0.25*DENDRO_185; const double DENDRO_190 = 0.25*DENDRO_187; const double DENDRO_191 = DENDRO_160*DENDRO_173; const double DENDRO_192 = DENDRO_126*DENDRO_183; const double DENDRO_193 = DENDRO_119*DENDRO_72; const double DENDRO_194 = DENDRO_113*DENDRO_140; const double DENDRO_195 = DENDRO_111*DENDRO_121; const double DENDRO_196 = 0.25*DENDRO_195; const double DENDRO_197 = 0.5*DENDRO_110; const double DENDRO_198 = DENDRO_163 + DENDRO_197; const double DENDRO_199 = DENDRO_169 - DENDRO_170 - DENDRO_171; const double DENDRO_200 = DENDRO_198*DENDRO_199; const double DENDRO_201 = DENDRO_119*DENDRO_167; const double DENDRO_202 = 0.25*DENDRO_201; const double DENDRO_203 = -DENDRO_202; const double DENDRO_204 = -DENDRO_179; const double DENDRO_205 = DENDRO_204 + 0.5*DENDRO_46; const double DENDRO_206 = DENDRO_110 - 2.0*DENDRO_161; const double DENDRO_207 = 2*DENDRO_37; const double DENDRO_208 = grad2_0_0_chi[pp]; const double DENDRO_209 = -DENDRO_208; const double DENDRO_210 = DENDRO_19*DENDRO_38; const double DENDRO_211 = DENDRO_67 - DENDRO_69 - DENDRO_70; const double DENDRO_212 = DENDRO_19*DENDRO_40; const double DENDRO_213 = DENDRO_19*DENDRO_39; const double DENDRO_214 = -DENDRO_74 - DENDRO_76 + DENDRO_78; const double DENDRO_215 = DENDRO_113*DENDRO_29; const double DENDRO_216 = grad_1_gt5[pp]; const double DENDRO_217 = DENDRO_216*DENDRO_25; const double DENDRO_218 = grad_2_gt3[pp]; const double DENDRO_219 = DENDRO_218*DENDRO_26; const double DENDRO_220 = DENDRO_126*DENDRO_23; const double DENDRO_221 = DENDRO_217 - DENDRO_219 + DENDRO_220; const double DENDRO_222 = DENDRO_221*DENDRO_25; const double DENDRO_223 = DENDRO_121*DENDRO_23; const double DENDRO_224 = grad_2_gt5[pp]; const double DENDRO_225 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_226 = 0.5*DENDRO_216; const double DENDRO_227 = grad_2_gt4[pp]; const double DENDRO_228 = 1.0*DENDRO_227; const double DENDRO_229 = -DENDRO_228; const double DENDRO_230 = DENDRO_226 + DENDRO_229; const double DENDRO_231 = -DENDRO_198*DENDRO_23 + DENDRO_224*DENDRO_225 + DENDRO_230*DENDRO_26; const double DENDRO_232 = DENDRO_231*DENDRO_33; const double DENDRO_233 = grad_1_gt3[pp]; const double DENDRO_234 = 0.5*DENDRO_233*DENDRO_26; const double DENDRO_235 = grad_1_gt4[pp]; const double DENDRO_236 = 1.0*DENDRO_235; const double DENDRO_237 = 0.5*DENDRO_218; const double DENDRO_238 = DENDRO_236 - DENDRO_237; const double DENDRO_239 = DENDRO_238*DENDRO_25; const double DENDRO_240 = DENDRO_140*DENDRO_23; const double DENDRO_241 = -DENDRO_234 + DENDRO_239 - DENDRO_240; const double DENDRO_242 = DENDRO_241*DENDRO_26; const double DENDRO_243 = DENDRO_31*DENDRO_57; const double DENDRO_244 = 2.0*DENDRO_101*(DENDRO_215 - 1.0*DENDRO_222 - 1.0*DENDRO_223 + DENDRO_232 + DENDRO_242 + DENDRO_243); const double DENDRO_245 = DENDRO_167*DENDRO_29; const double DENDRO_246 = DENDRO_218*DENDRO_25; const double DENDRO_247 = DENDRO_216*DENDRO_33; const double DENDRO_248 = DENDRO_126*DENDRO_29; const double DENDRO_249 = DENDRO_246 - DENDRO_247 - DENDRO_248; const double DENDRO_250 = DENDRO_249*DENDRO_25; const double DENDRO_251 = DENDRO_199*DENDRO_23; const double DENDRO_252 = 0.5*DENDRO_224*DENDRO_33; const double DENDRO_253 = DENDRO_198*DENDRO_29; const double DENDRO_254 = DENDRO_230*DENDRO_25; const double DENDRO_255 = -DENDRO_252 + DENDRO_253 - DENDRO_254; const double DENDRO_256 = DENDRO_255*DENDRO_33; const double DENDRO_257 = DENDRO_238*DENDRO_33; const double DENDRO_258 = DENDRO_140*DENDRO_29 + DENDRO_225*DENDRO_233 - DENDRO_257; const double DENDRO_259 = DENDRO_258*DENDRO_26; const double DENDRO_260 = DENDRO_211*DENDRO_31; const double DENDRO_261 = 2.0*DENDRO_101*(DENDRO_245 - 1.0*DENDRO_250 - 1.0*DENDRO_251 + DENDRO_256 + DENDRO_259 + DENDRO_260); const double DENDRO_262 = DENDRO_143 - DENDRO_144 - DENDRO_145; const double DENDRO_263 = DENDRO_262*DENDRO_29; const double DENDRO_264 = DENDRO_218*DENDRO_23; const double DENDRO_265 = DENDRO_216*DENDRO_29; const double DENDRO_266 = DENDRO_126*DENDRO_31; const double DENDRO_267 = DENDRO_264 - DENDRO_265 - DENDRO_266; const double DENDRO_268 = DENDRO_25*DENDRO_267; const double DENDRO_269 = DENDRO_150 - DENDRO_151 - DENDRO_152; const double DENDRO_270 = DENDRO_23*DENDRO_269; const double DENDRO_271 = DENDRO_224*DENDRO_68; const double DENDRO_272 = DENDRO_23*DENDRO_230; const double DENDRO_273 = DENDRO_198*DENDRO_31; const double DENDRO_274 = -DENDRO_271 - DENDRO_272 + DENDRO_273; const double DENDRO_275 = DENDRO_274*DENDRO_33; const double DENDRO_276 = DENDRO_238*DENDRO_29; const double DENDRO_277 = DENDRO_140*DENDRO_31 + DENDRO_233*DENDRO_45 - DENDRO_276; const double DENDRO_278 = DENDRO_26*DENDRO_277; const double DENDRO_279 = DENDRO_214*DENDRO_31; const double DENDRO_280 = 2.0*DENDRO_101*(DENDRO_263 - 1.0*DENDRO_268 - 1.0*DENDRO_270 + DENDRO_275 + DENDRO_278 + DENDRO_279); const double DENDRO_281 = grad2_2_2_chi[pp]; const double DENDRO_282 = 3*DENDRO_37; const double DENDRO_283 = pow(DENDRO_38, 2); const double DENDRO_284 = DENDRO_33*(2*DENDRO_281 - DENDRO_282*DENDRO_283); const double DENDRO_285 = grad2_1_1_chi[pp]; const double DENDRO_286 = pow(DENDRO_40, 2); const double DENDRO_287 = DENDRO_26*(-DENDRO_282*DENDRO_286 + 2*DENDRO_285); const double DENDRO_288 = DENDRO_31*(2*DENDRO_208 - DENDRO_282*DENDRO_94); const double DENDRO_289 = grad2_1_2_chi[pp]; const double DENDRO_290 = DENDRO_38*DENDRO_40; const double DENDRO_291 = 2*DENDRO_25*(-DENDRO_282*DENDRO_290 + 2*DENDRO_289); const double DENDRO_292 = grad2_0_2_chi[pp]; const double DENDRO_293 = 3*DENDRO_37*DENDRO_39; const double DENDRO_294 = 2*DENDRO_29*(2*DENDRO_292 - DENDRO_293*DENDRO_38); const double DENDRO_295 = grad2_0_1_chi[pp]; const double DENDRO_296 = 2*DENDRO_23*(-DENDRO_293*DENDRO_40 + 2*DENDRO_295); const double DENDRO_297 = 2*DENDRO_19; const double DENDRO_298 = -1.0*DENDRO_215 + DENDRO_222 + DENDRO_223 - DENDRO_232 - DENDRO_242 - DENDRO_243; const double DENDRO_299 = DENDRO_297*DENDRO_298*DENDRO_40; const double DENDRO_300 = -1.0*DENDRO_245 + DENDRO_250 + DENDRO_251 - DENDRO_256 - DENDRO_259 - DENDRO_260; const double DENDRO_301 = DENDRO_297*DENDRO_300*DENDRO_38; const double DENDRO_302 = -1.0*DENDRO_263 + DENDRO_268 + DENDRO_270 - DENDRO_275 - DENDRO_278 - DENDRO_279; const double DENDRO_303 = DENDRO_297*DENDRO_302*DENDRO_39; const double DENDRO_304 = DENDRO_19*DENDRO_37*(DENDRO_284 + DENDRO_287 + DENDRO_288 - DENDRO_291 + DENDRO_294 - DENDRO_296 + DENDRO_299 + DENDRO_301 + DENDRO_303); const double DENDRO_305 = grad2_2_2_alpha[pp]; const double DENDRO_306 = DENDRO_19*DENDRO_36*(DENDRO_231 + DENDRO_43*gt5[pp]); const double DENDRO_307 = 4*DENDRO_19*DENDRO_73; const double DENDRO_308 = 0.5*gt5[pp]; const double DENDRO_309 = DENDRO_23*DENDRO_40; const double DENDRO_310 = DENDRO_309 - DENDRO_82 - DENDRO_83; const double DENDRO_311 = DENDRO_308*DENDRO_310*DENDRO_37; const double DENDRO_312 = DENDRO_19*DENDRO_252; const double DENDRO_313 = DENDRO_19*DENDRO_253; const double DENDRO_314 = DENDRO_19*DENDRO_254; const double DENDRO_315 = DENDRO_16 - DENDRO_24; const double DENDRO_316 = DENDRO_62 + DENDRO_63; const double DENDRO_317 = DENDRO_315*DENDRO_40 + DENDRO_316; const double DENDRO_318 = DENDRO_37*(DENDRO_308*DENDRO_317*DENDRO_80 - 1.0*DENDRO_38); const double DENDRO_319 = grad_2_Gt0[pp]; const double DENDRO_320 = 4*gt4[pp]; const double DENDRO_321 = grad_2_Gt1[pp]; const double DENDRO_322 = grad_2_Gt2[pp]; const double DENDRO_323 = 4*DENDRO_101*DENDRO_26; const double DENDRO_324 = 0.25*DENDRO_218; const double DENDRO_325 = -DENDRO_324; const double DENDRO_326 = 0.75*DENDRO_103; const double DENDRO_327 = -DENDRO_175; const double DENDRO_328 = 2.0*DENDRO_101*DENDRO_25; const double DENDRO_329 = DENDRO_218*DENDRO_231; const double DENDRO_330 = DENDRO_113*DENDRO_324; const double DENDRO_331 = DENDRO_119*DENDRO_221; const double DENDRO_332 = DENDRO_113*DENDRO_238; const double DENDRO_333 = DENDRO_111*DENDRO_221; const double DENDRO_334 = 0.25*DENDRO_333; const double DENDRO_335 = 0.25*DENDRO_216; const double DENDRO_336 = -0.5*DENDRO_227 + DENDRO_335; const double DENDRO_337 = -DENDRO_221*DENDRO_336; const double DENDRO_338 = DENDRO_100*grad2_0_0_gt5[pp] - DENDRO_113*DENDRO_177*(DENDRO_107 + DENDRO_326 + DENDRO_327) + DENDRO_123*(2*DENDRO_231*DENDRO_238 + DENDRO_337) - DENDRO_128*(DENDRO_111*DENDRO_231 + DENDRO_113*DENDRO_216) + DENDRO_134*(DENDRO_330 + 0.5*DENDRO_331) + DENDRO_134*(DENDRO_332 + DENDRO_334) - DENDRO_221*DENDRO_323*(DENDRO_236 + DENDRO_325) - DENDRO_283*DENDRO_93 + DENDRO_319*DENDRO_91 + DENDRO_320*DENDRO_321 + 4*DENDRO_322*gt5[pp] + DENDRO_328*(DENDRO_216*DENDRO_221 + DENDRO_329) - DENDRO_95*grad2_1_2_gt5[pp] + DENDRO_96*grad2_0_2_gt5[pp] - DENDRO_97*grad2_0_1_gt5[pp] + DENDRO_98*grad2_2_2_gt5[pp] + DENDRO_99*grad2_1_1_gt5[pp]; const double DENDRO_339 = DENDRO_216*DENDRO_249; const double DENDRO_340 = 3.0*DENDRO_101*DENDRO_31; const double DENDRO_341 = DENDRO_110*DENDRO_167; const double DENDRO_342 = 6.0*DENDRO_101*DENDRO_224; const double DENDRO_343 = 4*DENDRO_101*DENDRO_31*(DENDRO_204 + DENDRO_47); const double DENDRO_344 = 4*DENDRO_101*DENDRO_26*(DENDRO_104 + DENDRO_174 + DENDRO_327); const double DENDRO_345 = 4*DENDRO_101*DENDRO_274*DENDRO_33; const double DENDRO_346 = 4*DENDRO_101*DENDRO_231*DENDRO_33; const double DENDRO_347 = DENDRO_167*DENDRO_335; const double DENDRO_348 = DENDRO_160*DENDRO_249; const double DENDRO_349 = DENDRO_224*DENDRO_249; const double DENDRO_350 = DENDRO_216*DENDRO_255; const double DENDRO_351 = DENDRO_167*DENDRO_224; const double DENDRO_352 = DENDRO_110*DENDRO_255; const double DENDRO_353 = DENDRO_274*DENDRO_48; const double DENDRO_354 = 0.25*DENDRO_224; const double DENDRO_355 = DENDRO_249*DENDRO_354; const double DENDRO_356 = DENDRO_167*DENDRO_354; const double DENDRO_357 = DENDRO_179*DENDRO_267; const double DENDRO_358 = DENDRO_119*DENDRO_262; const double DENDRO_359 = DENDRO_126*DENDRO_274; const double DENDRO_360 = DENDRO_126*DENDRO_262; const double DENDRO_361 = 0.25*DENDRO_360; const double DENDRO_362 = DENDRO_113*DENDRO_336; const double DENDRO_363 = -DENDRO_362; const double DENDRO_364 = DENDRO_119*DENDRO_231; const double DENDRO_365 = DENDRO_160 - 0.5*DENDRO_161; const double DENDRO_366 = DENDRO_267*DENDRO_365; const double DENDRO_367 = -DENDRO_366; const double DENDRO_368 = DENDRO_119*DENDRO_274; const double DENDRO_369 = -DENDRO_262*DENDRO_365; const double DENDRO_370 = 2.0*DENDRO_46 - 1.0*DENDRO_48; const double DENDRO_371 = -DENDRO_281; const double DENDRO_372 = DENDRO_38*DENDRO_80; const double DENDRO_373 = -DENDRO_226; const double DENDRO_374 = DENDRO_228 + DENDRO_373; const double DENDRO_375 = -DENDRO_197; const double DENDRO_376 = DENDRO_162 + DENDRO_375; const double DENDRO_377 = DENDRO_40*DENDRO_80; const double DENDRO_378 = -0.5*gt0[pp]*gt4[pp] + 0.5*gt1[pp]*gt2[pp]; const double DENDRO_379 = DENDRO_39*DENDRO_80; const double DENDRO_380 = 2.0*DENDRO_101*DENDRO_298; const double DENDRO_381 = 2.0*DENDRO_101*DENDRO_300; const double DENDRO_382 = 2.0*DENDRO_101*DENDRO_302; const double DENDRO_383 = DENDRO_19*DENDRO_37*(-DENDRO_284 - DENDRO_287 - DENDRO_288 + DENDRO_291 - DENDRO_294 + DENDRO_296 - DENDRO_299 - DENDRO_301 - DENDRO_303); const double DENDRO_384 = grad2_1_1_alpha[pp]; const double DENDRO_385 = 4*DENDRO_19*DENDRO_59; const double DENDRO_386 = 0.5*DENDRO_37*gt3[pp]; const double DENDRO_387 = -1.0*DENDRO_40; const double DENDRO_388 = 0.5*gt3[pp]; const double DENDRO_389 = DENDRO_315*DENDRO_38 + DENDRO_39*DENDRO_81 + DENDRO_41; const double DENDRO_390 = -DENDRO_19*DENDRO_234 + DENDRO_19*DENDRO_239 - DENDRO_19*DENDRO_240; const double DENDRO_391 = grad_1_Gt0[pp]; const double DENDRO_392 = grad_1_Gt1[pp]; const double DENDRO_393 = grad_1_Gt2[pp]; const double DENDRO_394 = DENDRO_218*DENDRO_221; const double DENDRO_395 = DENDRO_117*DENDRO_121; const double DENDRO_396 = DENDRO_121*DENDRO_324; const double DENDRO_397 = DENDRO_124*DENDRO_221; const double DENDRO_398 = DENDRO_100*grad2_0_0_gt3[pp] - DENDRO_114*DENDRO_258*DENDRO_26*(DENDRO_236 + DENDRO_237) - DENDRO_129*(DENDRO_121*DENDRO_237 + DENDRO_397) - DENDRO_129*(DENDRO_136*DENDRO_221 + DENDRO_396) - DENDRO_142*DENDRO_394 - DENDRO_286*DENDRO_93 + DENDRO_320*DENDRO_393 - DENDRO_340*DENDRO_395 + DENDRO_391*DENDRO_89 + 4*DENDRO_392*gt3[pp] - DENDRO_95*grad2_1_2_gt3[pp] + DENDRO_96*grad2_0_2_gt3[pp] - DENDRO_97*grad2_0_1_gt3[pp] + DENDRO_98*grad2_2_2_gt3[pp] + DENDRO_99*grad2_1_1_gt3[pp]; const double DENDRO_399 = 6.0*DENDRO_233; const double DENDRO_400 = 4*DENDRO_101*DENDRO_249*DENDRO_33; const double DENDRO_401 = 4*DENDRO_101*DENDRO_31*(DENDRO_131 + DENDRO_52); const double DENDRO_402 = -DENDRO_107; const double DENDRO_403 = 4*DENDRO_101*DENDRO_33*(DENDRO_104 + DENDRO_109 + DENDRO_402); const double DENDRO_404 = 4*DENDRO_101*DENDRO_31*(DENDRO_175 + DENDRO_326 + DENDRO_402); const double DENDRO_405 = 4*DENDRO_101*DENDRO_26*DENDRO_277; const double DENDRO_406 = DENDRO_221*DENDRO_233; const double DENDRO_407 = DENDRO_218*DENDRO_241; const double DENDRO_408 = DENDRO_121*DENDRO_233; const double DENDRO_409 = DENDRO_117*DENDRO_241; const double DENDRO_410 = DENDRO_216*DENDRO_258; const double DENDRO_411 = DENDRO_277*DENDRO_53; const double DENDRO_412 = 0.25*DENDRO_406; const double DENDRO_413 = 0.25*DENDRO_408; const double DENDRO_414 = DENDRO_199*DENDRO_335; const double DENDRO_415 = 0.5*DENDRO_103; const double DENDRO_416 = 0.5*DENDRO_108; const double DENDRO_417 = -0.5*DENDRO_106 + DENDRO_415 + DENDRO_416; const double DENDRO_418 = DENDRO_130*DENDRO_267; const double DENDRO_419 = DENDRO_126*DENDRO_277; const double DENDRO_420 = DENDRO_119*DENDRO_258; const double DENDRO_421 = DENDRO_126*DENDRO_269; const double DENDRO_422 = 0.25*DENDRO_421; const double DENDRO_423 = DENDRO_199*DENDRO_230; const double DENDRO_424 = DENDRO_119*DENDRO_249; const double DENDRO_425 = 0.25*DENDRO_424; const double DENDRO_426 = DENDRO_124 - 0.5*DENDRO_137; const double DENDRO_427 = DENDRO_267*DENDRO_426; const double DENDRO_428 = -DENDRO_427; const double DENDRO_429 = DENDRO_111*DENDRO_277; const double DENDRO_430 = 0.5*DENDRO_235 + DENDRO_325; const double DENDRO_431 = DENDRO_249*DENDRO_430; const double DENDRO_432 = 2*DENDRO_230*DENDRO_258; const double DENDRO_433 = -DENDRO_269*DENDRO_426; const double DENDRO_434 = 2*DENDRO_277*DENDRO_55; const double DENDRO_435 = DENDRO_199*DENDRO_430; const double DENDRO_436 = DENDRO_111*DENDRO_258; const double DENDRO_437 = -DENDRO_285; const double DENDRO_438 = -DENDRO_136; const double DENDRO_439 = DENDRO_138 + DENDRO_438; const double DENDRO_440 = -0.5*gt1[pp]*gt5[pp] + 0.5*gt2[pp]*gt4[pp]; const double DENDRO_441 = DENDRO_262*DENDRO_48; const double DENDRO_442 = DENDRO_269*DENDRO_53; const double DENDRO_443 = DENDRO_179*DENDRO_269; const double DENDRO_444 = DENDRO_130*DENDRO_262; const double DENDRO_445 = DENDRO_110*DENDRO_211; const double DENDRO_446 = DENDRO_262*DENDRO_44; const double DENDRO_447 = DENDRO_214*DENDRO_48; const double DENDRO_448 = DENDRO_269*DENDRO_44; const double DENDRO_449 = DENDRO_214*DENDRO_53; const double DENDRO_450 = 0.25*DENDRO_446; const double DENDRO_451 = 0.25*DENDRO_448; const double DENDRO_452 = DENDRO_160*DENDRO_199; const double DENDRO_453 = DENDRO_126*DENDRO_167; const double DENDRO_454 = DENDRO_119*DENDRO_211; const double DENDRO_455 = DENDRO_167*DENDRO_205; const double DENDRO_456 = DENDRO_199*DENDRO_205; const double DENDRO_457 = grad2_0_2_alpha[pp]; const double DENDRO_458 = DENDRO_19*DENDRO_36*(DENDRO_113 + DENDRO_37*DENDRO_42*gt2[pp]); const double DENDRO_459 = 2.0*DENDRO_59; const double DENDRO_460 = DENDRO_164*DENDRO_19; const double DENDRO_461 = DENDRO_165*DENDRO_19; const double DENDRO_462 = DENDRO_166*DENDRO_19; const double DENDRO_463 = -DENDRO_39; const double DENDRO_464 = DENDRO_80*gt2[pp]; const double DENDRO_465 = DENDRO_37*(DENDRO_317*DENDRO_464 + DENDRO_463); const double DENDRO_466 = 2.0*DENDRO_73; const double DENDRO_467 = DENDRO_144*DENDRO_19; const double DENDRO_468 = DENDRO_145*DENDRO_19; const double DENDRO_469 = DENDRO_143*DENDRO_19; const double DENDRO_470 = -DENDRO_38; const double DENDRO_471 = DENDRO_37*(DENDRO_464*DENDRO_85 + DENDRO_470); const double DENDRO_472 = -4*DENDRO_457 + 2.0*DENDRO_458 + DENDRO_459*(-DENDRO_460 - DENDRO_461 + DENDRO_462 + DENDRO_465) + DENDRO_466*(-DENDRO_467 - DENDRO_468 + DENDRO_469 + DENDRO_471); const double DENDRO_473 = -DENDRO_292; const double DENDRO_474 = 0.5*DENDRO_38*DENDRO_80; const double DENDRO_475 = 0.5*DENDRO_40*DENDRO_80; const double DENDRO_476 = 0.5*DENDRO_39*DENDRO_80; const double DENDRO_477 = DENDRO_106*DENDRO_380 + DENDRO_161*DENDRO_381 - DENDRO_207*(DENDRO_473 + DENDRO_474*(DENDRO_111*DENDRO_315 + DENDRO_182) + DENDRO_475*(DENDRO_110*DENDRO_315 + DENDRO_112 + DENDRO_48*DENDRO_81) + DENDRO_476*(DENDRO_111*DENDRO_81 + DENDRO_146)) + DENDRO_382*DENDRO_46 + DENDRO_383*gt2[pp]; const double DENDRO_478 = 2.0*gt0[pp]; const double DENDRO_479 = DENDRO_319*DENDRO_478; const double DENDRO_480 = 2.0*gt1[pp]; const double DENDRO_481 = DENDRO_321*DENDRO_480; const double DENDRO_482 = 2.0*gt2[pp]; const double DENDRO_483 = DENDRO_482*DENDRO_88; const double DENDRO_484 = DENDRO_322*DENDRO_482; const double DENDRO_485 = 2.0*gt4[pp]; const double DENDRO_486 = DENDRO_485*DENDRO_90; const double DENDRO_487 = 2.0*gt5[pp]; const double DENDRO_488 = DENDRO_487*DENDRO_92; const double DENDRO_489 = DENDRO_39*DENDRO_93; const double DENDRO_490 = -DENDRO_38*DENDRO_489; const double DENDRO_491 = -DENDRO_95*grad2_1_2_gt2[pp]; const double DENDRO_492 = DENDRO_96*grad2_0_2_gt2[pp]; const double DENDRO_493 = -DENDRO_97*grad2_0_1_gt2[pp]; const double DENDRO_494 = DENDRO_98*grad2_2_2_gt2[pp]; const double DENDRO_495 = DENDRO_99*grad2_1_1_gt2[pp]; const double DENDRO_496 = DENDRO_100*grad2_0_0_gt2[pp]; const double DENDRO_497 = DENDRO_160*DENDRO_262; const double DENDRO_498 = DENDRO_119*DENDRO_269; const double DENDRO_499 = 0.25*DENDRO_498; const double DENDRO_500 = DENDRO_214*DENDRO_50; const double DENDRO_501 = DENDRO_101*DENDRO_23; const double DENDRO_502 = DENDRO_119*DENDRO_121; const double DENDRO_503 = DENDRO_113*DENDRO_117; const double DENDRO_504 = DENDRO_221*DENDRO_53 + DENDRO_503; const double DENDRO_505 = DENDRO_110*DENDRO_199; const double DENDRO_506 = DENDRO_249*DENDRO_48 + DENDRO_505; const double DENDRO_507 = DENDRO_274*DENDRO_54; const double DENDRO_508 = DENDRO_160*DENDRO_269 + DENDRO_507; const double DENDRO_509 = 0.25*DENDRO_44; const double DENDRO_510 = DENDRO_267*DENDRO_509; const double DENDRO_511 = DENDRO_205*DENDRO_269; const double DENDRO_512 = DENDRO_444 + DENDRO_511; const double DENDRO_513 = DENDRO_126*DENDRO_221; const double DENDRO_514 = 0.25*DENDRO_513; const double DENDRO_515 = DENDRO_136*DENDRO_231; const double DENDRO_516 = DENDRO_121*DENDRO_335 + DENDRO_515; const double DENDRO_517 = -DENDRO_221*DENDRO_426; const double DENDRO_518 = DENDRO_121*DENDRO_430; const double DENDRO_519 = DENDRO_397 + DENDRO_518; const double DENDRO_520 = 0.5*DENDRO_44; const double DENDRO_521 = DENDRO_274*DENDRO_520; const double DENDRO_522 = DENDRO_179*DENDRO_262 + DENDRO_521; const double DENDRO_523 = DENDRO_205*DENDRO_262; const double DENDRO_524 = 0.5*DENDRO_106; const double DENDRO_525 = -0.5*DENDRO_103 + DENDRO_416 + DENDRO_524; const double DENDRO_526 = DENDRO_255*DENDRO_525 + DENDRO_347; const double DENDRO_527 = 0.25*DENDRO_358 + DENDRO_507; const double DENDRO_528 = 1.0*DENDRO_101*DENDRO_26; const double DENDRO_529 = DENDRO_199*DENDRO_216; const double DENDRO_530 = DENDRO_126*DENDRO_249; const double DENDRO_531 = -0.5*DENDRO_108 + DENDRO_415 + DENDRO_524; const double DENDRO_532 = DENDRO_214*DENDRO_531 + DENDRO_444; const double DENDRO_533 = DENDRO_167*DENDRO_179; const double DENDRO_534 = DENDRO_198*DENDRO_255; const double DENDRO_535 = -DENDRO_534; const double DENDRO_536 = 0.25*DENDRO_110*DENDRO_25 - 0.25*DENDRO_111*DENDRO_26 + 0.25*DENDRO_23*DENDRO_48; const double DENDRO_537 = DENDRO_111*DENDRO_536; const double DENDRO_538 = DENDRO_119*DENDRO_536 + DENDRO_231*DENDRO_54; const double DENDRO_539 = DENDRO_113*DENDRO_130 + DENDRO_531*DENDRO_57; const double DENDRO_540 = DENDRO_249*DENDRO_365; const double DENDRO_541 = -DENDRO_540; const double DENDRO_542 = DENDRO_199*DENDRO_354; const double DENDRO_543 = DENDRO_255*DENDRO_531 + DENDRO_542; const double DENDRO_544 = DENDRO_211*DENDRO_226; const double DENDRO_545 = DENDRO_205*DENDRO_249; const double DENDRO_546 = 0.25*DENDRO_453; const double DENDRO_547 = DENDRO_132*DENDRO_221; const double DENDRO_548 = -0.5*DENDRO_194 + DENDRO_238*DENDRO_57; const double DENDRO_549 = DENDRO_126*DENDRO_536 + DENDRO_226*DENDRO_57; const double DENDRO_550 = 0.5*DENDRO_224; const double DENDRO_551 = DENDRO_211*DENDRO_550; const double DENDRO_552 = -DENDRO_167*DENDRO_365; const double DENDRO_553 = 0.5*DENDRO_332; const double DENDRO_554 = DENDRO_140*DENDRO_231; const double DENDRO_555 = DENDRO_553 - DENDRO_554; const double DENDRO_556 = DENDRO_113*DENDRO_335 + DENDRO_231*DENDRO_525; const double DENDRO_557 = 0.25*DENDRO_530; const double DENDRO_558 = 0.5*DENDRO_351; const double DENDRO_559 = DENDRO_101*DENDRO_25; const double DENDRO_560 = DENDRO_113*DENDRO_218; const double DENDRO_561 = DENDRO_121*DENDRO_216 + DENDRO_560; const double DENDRO_562 = DENDRO_559*(DENDRO_513 + DENDRO_561); const double DENDRO_563 = DENDRO_267*DENDRO_48; const double DENDRO_564 = DENDRO_110*DENDRO_269 + DENDRO_563; const double DENDRO_565 = DENDRO_179*DENDRO_249; const double DENDRO_566 = DENDRO_452 + DENDRO_544; const double DENDRO_567 = DENDRO_347 + DENDRO_541; const double DENDRO_568 = DENDRO_396 + DENDRO_517; const double DENDRO_569 = -DENDRO_323*(DENDRO_518 + DENDRO_568); const double DENDRO_570 = DENDRO_160*DENDRO_167 + DENDRO_551; const double DENDRO_571 = DENDRO_130*DENDRO_221; const double DENDRO_572 = DENDRO_237*DENDRO_57; const double DENDRO_573 = 0.25*DENDRO_502; const double DENDRO_574 = DENDRO_134*(DENDRO_571 + DENDRO_572 + DENDRO_573); const double DENDRO_575 = DENDRO_267*DENDRO_53; const double DENDRO_576 = -DENDRO_102*(DENDRO_363 + DENDRO_556); const double DENDRO_577 = -DENDRO_129*(DENDRO_537 + DENDRO_549); const double DENDRO_578 = -DENDRO_269*DENDRO_365; const double DENDRO_579 = DENDRO_214*DENDRO_525 + DENDRO_510; const double DENDRO_580 = DENDRO_121*DENDRO_336; const double DENDRO_581 = -DENDRO_580; const double DENDRO_582 = DENDRO_134*(DENDRO_196 + DENDRO_548); const double DENDRO_583 = -DENDRO_129*(-DENDRO_230*DENDRO_57 + DENDRO_538); const double DENDRO_584 = -DENDRO_177*(DENDRO_417*DENDRO_57 + DENDRO_539); const double DENDRO_585 = grad2_1_2_alpha[pp]; const double DENDRO_586 = DENDRO_19*DENDRO_73; const double DENDRO_587 = 2.0*DENDRO_36; const double DENDRO_588 = DENDRO_80*gt4[pp]; const double DENDRO_589 = DENDRO_19*DENDRO_217 - DENDRO_19*DENDRO_219 + DENDRO_19*DENDRO_220; const double DENDRO_590 = DENDRO_19*DENDRO_246; const double DENDRO_591 = DENDRO_19*DENDRO_247; const double DENDRO_592 = DENDRO_19*DENDRO_248; const double DENDRO_593 = -DENDRO_40; const double DENDRO_594 = DENDRO_37*(DENDRO_317*DENDRO_588 + DENDRO_593); const double DENDRO_595 = DENDRO_459*(DENDRO_590 - DENDRO_591 - DENDRO_592 + DENDRO_594) - 4*DENDRO_585 + DENDRO_586*(-2.0*DENDRO_126*DENDRO_31 - 2.0*DENDRO_216*DENDRO_29 + 2.0*DENDRO_218*DENDRO_23 + 2.0*DENDRO_310*DENDRO_37*gt4[pp]) + DENDRO_587*(DENDRO_37*(DENDRO_389*DENDRO_588 + DENDRO_470) + DENDRO_589); const double DENDRO_596 = -DENDRO_289; const double DENDRO_597 = DENDRO_247 + DENDRO_248; const double DENDRO_598 = DENDRO_265 + DENDRO_266; const double DENDRO_599 = DENDRO_103*DENDRO_382 - DENDRO_207*(DENDRO_474*(DENDRO_218*DENDRO_315 + DENDRO_597) + DENDRO_475*(DENDRO_126*DENDRO_81 + DENDRO_216*DENDRO_315 + DENDRO_219) + DENDRO_476*(DENDRO_218*DENDRO_81 + DENDRO_598) + DENDRO_596) + DENDRO_227*DENDRO_381 + DENDRO_235*DENDRO_380 + DENDRO_383*gt4[pp]; const double DENDRO_600 = DENDRO_319*DENDRO_480; const double DENDRO_601 = DENDRO_391*DENDRO_482; const double DENDRO_602 = 2.0*gt3[pp]; const double DENDRO_603 = DENDRO_321*DENDRO_602; const double DENDRO_604 = DENDRO_392*DENDRO_485; const double DENDRO_605 = DENDRO_322*DENDRO_485; const double DENDRO_606 = DENDRO_393*DENDRO_487; const double DENDRO_607 = -DENDRO_290*DENDRO_93; const double DENDRO_608 = -DENDRO_95*grad2_1_2_gt4[pp]; const double DENDRO_609 = DENDRO_96*grad2_0_2_gt4[pp]; const double DENDRO_610 = -DENDRO_97*grad2_0_1_gt4[pp]; const double DENDRO_611 = DENDRO_98*grad2_2_2_gt4[pp]; const double DENDRO_612 = DENDRO_99*grad2_1_1_gt4[pp]; const double DENDRO_613 = DENDRO_100*grad2_0_0_gt4[pp]; const double DENDRO_614 = DENDRO_221*DENDRO_335; const double DENDRO_615 = DENDRO_238*DENDRO_241; const double DENDRO_616 = DENDRO_233*DENDRO_536; const double DENDRO_617 = DENDRO_167*DENDRO_218 + DENDRO_529; const double DENDRO_618 = DENDRO_117*DENDRO_262 + DENDRO_575; const double DENDRO_619 = DENDRO_221*DENDRO_430; const double DENDRO_620 = 0.5*DENDRO_233; const double DENDRO_621 = DENDRO_231*DENDRO_620; const double DENDRO_622 = DENDRO_221*DENDRO_324 + DENDRO_621; const double DENDRO_623 = 0.25*DENDRO_331 + DENDRO_515; const double DENDRO_624 = DENDRO_241*DENDRO_531; const double DENDRO_625 = DENDRO_249*DENDRO_324; const double DENDRO_626 = DENDRO_255*DENDRO_417 + DENDRO_348; const double DENDRO_627 = DENDRO_104 + DENDRO_175 + DENDRO_402; const double DENDRO_628 = DENDRO_262*DENDRO_627; const double DENDRO_629 = 1.0*DENDRO_101*DENDRO_31; const double DENDRO_630 = DENDRO_111*DENDRO_167; const double DENDRO_631 = DENDRO_132*DENDRO_262; const double DENDRO_632 = DENDRO_230*DENDRO_255; const double DENDRO_633 = -DENDRO_632; const double DENDRO_634 = DENDRO_167*DENDRO_336; const double DENDRO_635 = -DENDRO_634; const double DENDRO_636 = DENDRO_197*DENDRO_258; const double DENDRO_637 = DENDRO_167*DENDRO_430; const double DENDRO_638 = DENDRO_249*DENDRO_627; const double DENDRO_639 = -0.25*DENDRO_126*DENDRO_31 - 0.25*DENDRO_216*DENDRO_29 + 0.25*DENDRO_218*DENDRO_23; const double DENDRO_640 = DENDRO_126*DENDRO_639; const double DENDRO_641 = DENDRO_119*DENDRO_639 + DENDRO_136*DENDRO_274; const double DENDRO_642 = DENDRO_277*DENDRO_531; const double DENDRO_643 = DENDRO_124*DENDRO_267 + DENDRO_642; const double DENDRO_644 = DENDRO_258*DENDRO_550; const double DENDRO_645 = -DENDRO_249*DENDRO_336; const double DENDRO_646 = -DENDRO_262*DENDRO_426; const double DENDRO_647 = DENDRO_277*DENDRO_50; const double DENDRO_648 = DENDRO_132*DENDRO_267 + DENDRO_647; const double DENDRO_649 = DENDRO_197*DENDRO_277; const double DENDRO_650 = DENDRO_267*DENDRO_627 + DENDRO_649; const double DENDRO_651 = DENDRO_205*DENDRO_267 + DENDRO_274*DENDRO_55; const double DENDRO_652 = DENDRO_160*DENDRO_267 + DENDRO_274*DENDRO_417; const double DENDRO_653 = 1.0*DENDRO_410; const double DENDRO_654 = 0.25*DENDRO_630; const double DENDRO_655 = 0.5*DENDRO_349; const double DENDRO_656 = 1.0*DENDRO_101*DENDRO_29; const double DENDRO_657 = -DENDRO_656*(DENDRO_333 + DENDRO_561); const double DENDRO_658 = DENDRO_111*DENDRO_262; const double DENDRO_659 = DENDRO_167*DENDRO_324; const double DENDRO_660 = DENDRO_414 + DENDRO_636; const double DENDRO_661 = DENDRO_348 + DENDRO_635; const double DENDRO_662 = -DENDRO_629*(DENDRO_195 + DENDRO_502 + DENDRO_503); const double DENDRO_663 = -DENDRO_102*(DENDRO_231*DENDRO_237 + DENDRO_337 + DENDRO_614); const double DENDRO_664 = DENDRO_249*DENDRO_335 + DENDRO_644; const double DENDRO_665 = DENDRO_396 + DENDRO_397; const double DENDRO_666 = DENDRO_124*DENDRO_262; const double DENDRO_667 = DENDRO_277*DENDRO_49; const double DENDRO_668 = DENDRO_443 + DENDRO_631; const double DENDRO_669 = -DENDRO_129*(DENDRO_581 + DENDRO_623); const double DENDRO_670 = DENDRO_241*DENDRO_417 + DENDRO_616; const double DENDRO_671 = DENDRO_619 + DENDRO_621; const double DENDRO_672 = DENDRO_198*DENDRO_277; const double DENDRO_673 = 0.5*DENDRO_419; const double DENDRO_674 = grad2_0_1_alpha[pp]; const double DENDRO_675 = DENDRO_19*DENDRO_59; const double DENDRO_676 = DENDRO_80*gt1[pp]; const double DENDRO_677 = DENDRO_116*DENDRO_19 - DENDRO_118*DENDRO_19 + DENDRO_120*DENDRO_19; const double DENDRO_678 = DENDRO_150*DENDRO_19; const double DENDRO_679 = DENDRO_151*DENDRO_19; const double DENDRO_680 = DENDRO_152*DENDRO_19; const double DENDRO_681 = DENDRO_37*(DENDRO_593 + DENDRO_676*DENDRO_85); const double DENDRO_682 = DENDRO_466*(DENDRO_678 - DENDRO_679 - DENDRO_680 + DENDRO_681) + DENDRO_587*(DENDRO_37*(DENDRO_389*DENDRO_676 + DENDRO_463) + DENDRO_677) - 4*DENDRO_674 + DENDRO_675*(2.0*DENDRO_117*DENDRO_25 - 2.0*DENDRO_119*DENDRO_33 - 2.0*DENDRO_29*DENDRO_53 + 2.0*DENDRO_37*DENDRO_64*gt1[pp]); const double DENDRO_683 = -DENDRO_295; const double DENDRO_684 = DENDRO_108*DENDRO_381 + DENDRO_137*DENDRO_380 - DENDRO_207*(DENDRO_474*(DENDRO_117*DENDRO_315 + DENDRO_172) + DENDRO_475*(DENDRO_118 + DENDRO_119*DENDRO_315 + DENDRO_53*DENDRO_81) + DENDRO_476*(DENDRO_117*DENDRO_81 + DENDRO_153) + DENDRO_683) + DENDRO_382*DENDRO_51 + DENDRO_383*gt1[pp]; const double DENDRO_685 = DENDRO_391*DENDRO_478; const double DENDRO_686 = DENDRO_480*DENDRO_88; const double DENDRO_687 = DENDRO_392*DENDRO_480; const double DENDRO_688 = DENDRO_393*DENDRO_482; const double DENDRO_689 = DENDRO_602*DENDRO_90; const double DENDRO_690 = DENDRO_485*DENDRO_92; const double DENDRO_691 = -DENDRO_40*DENDRO_489; const double DENDRO_692 = -DENDRO_95*grad2_1_2_gt1[pp]; const double DENDRO_693 = DENDRO_96*grad2_0_2_gt1[pp]; const double DENDRO_694 = -DENDRO_97*grad2_0_1_gt1[pp]; const double DENDRO_695 = DENDRO_98*grad2_2_2_gt1[pp]; const double DENDRO_696 = DENDRO_99*grad2_1_1_gt1[pp]; const double DENDRO_697 = DENDRO_100*grad2_0_0_gt1[pp]; const double DENDRO_698 = DENDRO_121*DENDRO_130; const double DENDRO_699 = -DENDRO_177*(1.0*DENDRO_122 + DENDRO_698); const double DENDRO_700 = -DENDRO_102*(DENDRO_113*DENDRO_237 + DENDRO_514); const double DENDRO_701 = 0.5*DENDRO_408; const double DENDRO_702 = DENDRO_140*DENDRO_241; const double DENDRO_703 = -DENDRO_702; const double DENDRO_704 = DENDRO_123*(DENDRO_568 + DENDRO_616); const double DENDRO_705 = DENDRO_125 + DENDRO_572; const double DENDRO_706 = -DENDRO_129*(DENDRO_571 + DENDRO_705); const double DENDRO_707 = -DENDRO_121*DENDRO_426; const double DENDRO_708 = DENDRO_57*DENDRO_620; const double DENDRO_709 = DENDRO_121*DENDRO_124 + DENDRO_708; const double DENDRO_710 = DENDRO_134*(DENDRO_707 + DENDRO_709); const double DENDRO_711 = DENDRO_241*DENDRO_525; const double DENDRO_712 = DENDRO_396 + DENDRO_616; const double DENDRO_713 = 0.25*DENDRO_127; const double DENDRO_714 = -DENDRO_129*(DENDRO_705 + DENDRO_713); const double DENDRO_715 = 1.0*DENDRO_101*DENDRO_33; const double DENDRO_716 = DENDRO_124*DENDRO_269; const double DENDRO_717 = DENDRO_277*DENDRO_54; const double DENDRO_718 = DENDRO_214*DENDRO_417 + DENDRO_443; const double DENDRO_719 = DENDRO_214*DENDRO_55; const double DENDRO_720 = DENDRO_269*DENDRO_627; const double DENDRO_721 = DENDRO_258*DENDRO_525; const double DENDRO_722 = DENDRO_199*DENDRO_324 + DENDRO_721; const double DENDRO_723 = 0.25*DENDRO_117*DENDRO_25 - 0.25*DENDRO_119*DENDRO_33 - 0.25*DENDRO_29*DENDRO_53; const double DENDRO_724 = DENDRO_119*DENDRO_723; const double DENDRO_725 = DENDRO_126*DENDRO_723 + DENDRO_211*DENDRO_237; const double DENDRO_726 = DENDRO_277*DENDRO_520; const double DENDRO_727 = DENDRO_132*DENDRO_269; const double DENDRO_728 = DENDRO_258*DENDRO_49; const double DENDRO_729 = DENDRO_199*DENDRO_627 + DENDRO_728; const double DENDRO_730 = 0.5*DENDRO_423; const double DENDRO_731 = DENDRO_198*DENDRO_258; const double DENDRO_732 = -DENDRO_730 - DENDRO_731; const double DENDRO_733 = 0.5*DENDRO_200; const double DENDRO_734 = DENDRO_211*DENDRO_230; const double DENDRO_735 = -DENDRO_733 - DENDRO_734; const double DENDRO_736 = DENDRO_179*DENDRO_199 + DENDRO_211*DENDRO_417; const double DENDRO_737 = DENDRO_418 + DENDRO_667; const double DENDRO_738 = DENDRO_130*DENDRO_269 + DENDRO_726; const double DENDRO_739 = DENDRO_19*(-DENDRO_23*(DENDRO_682 + alpha[pp]*(-DENDRO_102*(DENDRO_541 + DENDRO_661) - DENDRO_102*(DENDRO_267*DENDRO_49 + DENDRO_628) + DENDRO_115*(DENDRO_241*DENDRO_53 + DENDRO_395) + DENDRO_123*(DENDRO_425 + DENDRO_732) + DENDRO_123*(DENDRO_517 + DENDRO_670) + DENDRO_123*(DENDRO_665 + DENDRO_711) + DENDRO_123*(DENDRO_666 + DENDRO_737) + DENDRO_123*(DENDRO_720 + DENDRO_737) + DENDRO_123*(DENDRO_557 + DENDRO_636 + DENDRO_659) - DENDRO_129*(DENDRO_510 + DENDRO_668) - DENDRO_129*(DENDRO_510 + DENDRO_718) - DENDRO_129*(DENDRO_545 + DENDRO_735) - DENDRO_129*(DENDRO_547 + DENDRO_572 + DENDRO_713) + DENDRO_134*(DENDRO_724 + DENDRO_729) + DENDRO_134*(DENDRO_727 + DENDRO_738) + DENDRO_134*(DENDRO_136*DENDRO_214 + DENDRO_738) + DENDRO_134*(DENDRO_258*DENDRO_50 + DENDRO_725) + DENDRO_134*(DENDRO_241*DENDRO_55 + DENDRO_707 + DENDRO_708) - DENDRO_177*(0.5*DENDRO_448 + DENDRO_719) - DENDRO_177*(DENDRO_456 + DENDRO_736) - DENDRO_177*(DENDRO_135 + DENDRO_136*DENDRO_57 + DENDRO_698) - DENDRO_323*(1.0*DENDRO_411 + DENDRO_716) - DENDRO_323*(0.5*DENDRO_420 + DENDRO_722) - DENDRO_323*(DENDRO_136*DENDRO_241 + DENDRO_413 + DENDRO_703) - DENDRO_656*(DENDRO_195 + DENDRO_504) - DENDRO_656*(DENDRO_506 + DENDRO_630) + DENDRO_684 + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 - DENDRO_715*(DENDRO_333 + DENDRO_513 + DENDRO_560))) - DENDRO_23*(DENDRO_682 + alpha[pp]*(-DENDRO_102*(DENDRO_567 + DENDRO_635) + DENDRO_115*(DENDRO_117*DENDRO_214 + DENDRO_442) + DENDRO_123*(DENDRO_637 + DENDRO_732) + DENDRO_123*(DENDRO_711 + DENDRO_712) + DENDRO_123*(DENDRO_646 + DENDRO_667 + DENDRO_720) - DENDRO_129*(DENDRO_202 + DENDRO_735) - DENDRO_129*(DENDRO_444 + DENDRO_718) - DENDRO_129*(DENDRO_579 + DENDRO_631) - DENDRO_129*(DENDRO_544 + DENDRO_565 + DENDRO_654) + DENDRO_134*(DENDRO_724 + DENDRO_725) + DENDRO_134*(DENDRO_211*DENDRO_238 + DENDRO_729) + DENDRO_134*(DENDRO_241*DENDRO_54 + DENDRO_709) + DENDRO_134*(-DENDRO_140*DENDRO_214 + DENDRO_726 + DENDRO_727) - DENDRO_177*(0.5*DENDRO_454 + DENDRO_736) - DENDRO_177*(DENDRO_214*DENDRO_54 + DENDRO_451 + DENDRO_719) - DENDRO_323*(DENDRO_435 + DENDRO_722) - DENDRO_323*(DENDRO_701 + DENDRO_703) - DENDRO_323*(DENDRO_433 + DENDRO_716 + DENDRO_717) + DENDRO_559*(DENDRO_421 + DENDRO_618) + DENDRO_559*(DENDRO_530 + DENDRO_617) + DENDRO_684 + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 + DENDRO_699 + DENDRO_700 + DENDRO_704 + DENDRO_706 + DENDRO_710 + DENDRO_714 - DENDRO_715*(DENDRO_360 + DENDRO_563 + DENDRO_658))) - DENDRO_25*(DENDRO_595 + alpha[pp]*(-DENDRO_102*(1.0*DENDRO_329 + DENDRO_614) - DENDRO_102*(0.5*DENDRO_359 + DENDRO_652) - DENDRO_102*(DENDRO_226*DENDRO_255 + DENDRO_355 + DENDRO_633) + DENDRO_123*(DENDRO_619 + DENDRO_622) + DENDRO_123*(DENDRO_640 + DENDRO_641) + DENDRO_123*(-DENDRO_140*DENDRO_274 + DENDRO_650) + DENDRO_123*(DENDRO_226*DENDRO_241 + DENDRO_622) + DENDRO_123*(DENDRO_238*DENDRO_255 + DENDRO_644 + DENDRO_645) - DENDRO_129*(DENDRO_330 + DENDRO_516) - DENDRO_129*(DENDRO_330 + DENDRO_623) - DENDRO_129*(DENDRO_347 + DENDRO_626) - DENDRO_129*(DENDRO_361 + DENDRO_651) - DENDRO_129*(DENDRO_508 + DENDRO_628) - DENDRO_129*(DENDRO_543 + DENDRO_635) + DENDRO_134*(DENDRO_519 + DENDRO_616) + DENDRO_134*(DENDRO_646 + DENDRO_648) + DENDRO_134*(DENDRO_397 + DENDRO_616 + DENDRO_624) + DENDRO_134*(DENDRO_636 + DENDRO_637 + DENDRO_638) - DENDRO_177*(DENDRO_512 + DENDRO_631) - DENDRO_177*(DENDRO_113*DENDRO_136 + DENDRO_573) - DENDRO_323*(0.5*DENDRO_406 + DENDRO_615) - DENDRO_323*(DENDRO_428 + DENDRO_643) - DENDRO_323*(DENDRO_226*DENDRO_258 + DENDRO_431 + DENDRO_625) + DENDRO_328*(DENDRO_218*DENDRO_255 + DENDRO_339) + DENDRO_501*(DENDRO_424 + DENDRO_617) + DENDRO_501*(DENDRO_498 + DENDRO_618) + DENDRO_599 + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 - DENDRO_629*(DENDRO_201 + DENDRO_505 + DENDRO_630))) - DENDRO_25*(DENDRO_595 + alpha[pp]*(-DENDRO_102*(DENDRO_367 + DENDRO_652) - DENDRO_102*(DENDRO_633 + DENDRO_655) + DENDRO_123*(DENDRO_640 + DENDRO_650) + DENDRO_123*(DENDRO_641 - DENDRO_672) + DENDRO_123*(DENDRO_645 + DENDRO_664) + DENDRO_123*(-DENDRO_230*DENDRO_241 + DENDRO_671) + DENDRO_123*(DENDRO_237*DENDRO_255 + DENDRO_664) - DENDRO_129*(DENDRO_542 + DENDRO_626) - DENDRO_129*(DENDRO_542 + DENDRO_661) - DENDRO_129*(DENDRO_578 + DENDRO_651) + DENDRO_134*(DENDRO_422 + DENDRO_648) + DENDRO_134*(DENDRO_518 + DENDRO_670) + DENDRO_134*(DENDRO_624 + DENDRO_665) + DENDRO_134*(DENDRO_638 + DENDRO_660) + DENDRO_134*(DENDRO_659 + DENDRO_660) + DENDRO_134*(DENDRO_499 + DENDRO_666 + DENDRO_667) - DENDRO_177*(DENDRO_511 + DENDRO_668) - DENDRO_177*(DENDRO_197*DENDRO_199 + DENDRO_654) - DENDRO_323*(DENDRO_625 + DENDRO_653) - DENDRO_323*(DENDRO_643 + DENDRO_673) - DENDRO_323*(DENDRO_237*DENDRO_241 + DENDRO_412 + DENDRO_615) + DENDRO_328*(DENDRO_216*DENDRO_241 + DENDRO_394) + DENDRO_599 + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 - DENDRO_656*(DENDRO_564 + DENDRO_658) + DENDRO_657 + DENDRO_662 + DENDRO_663 + DENDRO_669)) + DENDRO_26*(DENDRO_307*(DENDRO_277 + DENDRO_310*DENDRO_386) + 4*DENDRO_36*(DENDRO_37*(DENDRO_387 + DENDRO_388*DENDRO_389*DENDRO_80) + DENDRO_390) - 4*DENDRO_384 + DENDRO_385*(DENDRO_258 + DENDRO_65*gt3[pp]) + alpha[pp]*(-DENDRO_101*DENDRO_241*DENDRO_26*DENDRO_399 + DENDRO_115*(DENDRO_408 + DENDRO_409) + DENDRO_115*(DENDRO_117*DENDRO_269 + DENDRO_411) + DENDRO_115*(DENDRO_199*DENDRO_218 + DENDRO_420) + DENDRO_117*DENDRO_382 + DENDRO_123*(1.0*DENDRO_407 + DENDRO_412) + DENDRO_123*(DENDRO_428 + DENDRO_429) + DENDRO_123*(DENDRO_431 - DENDRO_432) - DENDRO_129*(-1.0*DENDRO_423 + DENDRO_425) - DENDRO_129*(DENDRO_249*DENDRO_417 + DENDRO_414) - DENDRO_129*(DENDRO_267*DENDRO_55 + DENDRO_422) - DENDRO_129*(DENDRO_269*DENDRO_417 + DENDRO_418) + DENDRO_134*(1.0*DENDRO_409 + DENDRO_413) + DENDRO_134*(DENDRO_433 + DENDRO_434) + DENDRO_134*(DENDRO_435 + DENDRO_436) - DENDRO_199*DENDRO_404 - DENDRO_207*(DENDRO_372*(DENDRO_233*DENDRO_378 + DENDRO_257 + DENDRO_29*DENDRO_439) + DENDRO_377*(DENDRO_234 + DENDRO_238*DENDRO_315 + DENDRO_439*DENDRO_81) + DENDRO_379*(DENDRO_233*DENDRO_440 + DENDRO_276 + DENDRO_31*DENDRO_439) + DENDRO_437) + DENDRO_218*DENDRO_381 + DENDRO_233*DENDRO_380 - DENDRO_267*DENDRO_403 - DENDRO_269*DENDRO_401 + DENDRO_328*(DENDRO_406 + DENDRO_407) + DENDRO_328*(DENDRO_117*DENDRO_267 + DENDRO_419) + DENDRO_328*(DENDRO_218*DENDRO_249 + DENDRO_410) + DENDRO_383*gt3[pp] + DENDRO_398 - DENDRO_400*(DENDRO_228 - DENDRO_335) - DENDRO_405*(DENDRO_136 + DENDRO_138))) + DENDRO_29*(DENDRO_472 + alpha[pp]*(-DENDRO_102*(1.0*DENDRO_353 + DENDRO_497) - DENDRO_102*(DENDRO_231*DENDRO_417 + DENDRO_556) - DENDRO_102*(DENDRO_197*DENDRO_255 + DENDRO_356 + DENDRO_535) + DENDRO_123*(DENDRO_334 + DENDRO_555) + DENDRO_123*(DENDRO_348 + DENDRO_526) + DENDRO_123*(DENDRO_357 + DENDRO_508) + DENDRO_123*(DENDRO_357 + DENDRO_527) + DENDRO_123*(DENDRO_514 + DENDRO_516) + DENDRO_123*(DENDRO_541 + DENDRO_543) - DENDRO_128*(DENDRO_255*DENDRO_48 + DENDRO_341) - DENDRO_129*(DENDRO_522 + DENDRO_523) - DENDRO_129*(DENDRO_537 + DENDRO_538) - DENDRO_129*(DENDRO_197*DENDRO_214 + DENDRO_522) - DENDRO_129*(DENDRO_231*DENDRO_55 + DENDRO_549) - DENDRO_129*(DENDRO_255*DENDRO_50 + DENDRO_551 + DENDRO_552) + DENDRO_134*(DENDRO_510 + DENDRO_512) + DENDRO_134*(DENDRO_510 + DENDRO_532) + DENDRO_134*(DENDRO_547 + DENDRO_548) + DENDRO_134*(DENDRO_544 + DENDRO_545 + DENDRO_546) - DENDRO_177*(DENDRO_133 + DENDRO_539) - DENDRO_177*(0.5*DENDRO_446 + DENDRO_500) - DENDRO_177*(DENDRO_197*DENDRO_211 + DENDRO_455 + DENDRO_533) - DENDRO_323*(DENDRO_517 + DENDRO_519) - DENDRO_323*(DENDRO_267*DENDRO_54 + DENDRO_499) + DENDRO_477 + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 + DENDRO_501*(DENDRO_201 + DENDRO_506) + DENDRO_501*(DENDRO_502 + DENDRO_504) - DENDRO_528*(DENDRO_424 + DENDRO_529 + DENDRO_530))) + DENDRO_29*(DENDRO_472 + alpha[pp]*(-DENDRO_102*(DENDRO_535 + DENDRO_558) - DENDRO_102*(DENDRO_274*DENDRO_49 + DENDRO_369 + DENDRO_497) + DENDRO_123*(DENDRO_526 + DENDRO_542) + DENDRO_123*(DENDRO_527 + DENDRO_578) + DENDRO_123*(DENDRO_542 + DENDRO_567) + DENDRO_123*(DENDRO_555 + DENDRO_581) - DENDRO_128*(DENDRO_110*DENDRO_214 + DENDRO_441) - DENDRO_129*(DENDRO_552 + DENDRO_570) - DENDRO_129*(DENDRO_255*DENDRO_49 + DENDRO_570) - DENDRO_129*(-DENDRO_198*DENDRO_214 + DENDRO_521 + DENDRO_523) + DENDRO_134*(DENDRO_443 + DENDRO_532) + DENDRO_134*(DENDRO_511 + DENDRO_579) + DENDRO_134*(DENDRO_546 + DENDRO_566) + DENDRO_134*(DENDRO_565 + DENDRO_566) - DENDRO_177*(1.0*DENDRO_445 + DENDRO_533) - DENDRO_177*(DENDRO_214*DENDRO_49 + DENDRO_450 + DENDRO_500) - DENDRO_323*(DENDRO_199*DENDRO_226 + DENDRO_557) + DENDRO_477 + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 - DENDRO_528*(DENDRO_421 + DENDRO_498 + DENDRO_575) + DENDRO_559*(DENDRO_360 + DENDRO_564) + DENDRO_562 + DENDRO_569 + DENDRO_574 + DENDRO_576 + DENDRO_577 + DENDRO_582 + DENDRO_583 + DENDRO_584)) + DENDRO_31*(-4*DENDRO_35 + DENDRO_385*(DENDRO_211 + DENDRO_66) + 4*DENDRO_58 + 4*DENDRO_73*(-DENDRO_75 - DENDRO_77 + DENDRO_79 + DENDRO_86) + alpha[pp]*(-DENDRO_114*DENDRO_178*DENDRO_211*DENDRO_31 + DENDRO_115*(DENDRO_448 + DENDRO_449) + DENDRO_115*(DENDRO_199*DENDRO_48 + DENDRO_454) + DENDRO_123*(-1.0*DENDRO_194 + DENDRO_196) + DENDRO_123*(-1.0*DENDRO_200 + DENDRO_202) + DENDRO_123*(DENDRO_452 + 0.5*DENDRO_453) + DENDRO_123*(DENDRO_262*DENDRO_54 + DENDRO_443) + DENDRO_123*(DENDRO_269*DENDRO_49 + DENDRO_444) - DENDRO_128*(DENDRO_446 + DENDRO_447) - DENDRO_128*(DENDRO_167*DENDRO_48 + DENDRO_445) - DENDRO_129*(1.0*DENDRO_447 + DENDRO_450) - DENDRO_129*(-DENDRO_206*DENDRO_211 + DENDRO_455) + DENDRO_134*(1.0*DENDRO_449 + DENDRO_451) + DENDRO_134*(DENDRO_126*DENDRO_211 + DENDRO_456) + DENDRO_141 - DENDRO_142*DENDRO_441 - DENDRO_149*DENDRO_442 - DENDRO_156*(-DENDRO_124 + DENDRO_138) - DENDRO_157*DENDRO_279 - DENDRO_168*(-DENDRO_160 + DENDRO_162) - DENDRO_176*DENDRO_199 - DENDRO_207*(DENDRO_209 + DENDRO_372*(DENDRO_315*DENDRO_55 + DENDRO_71) + DENDRO_377*(DENDRO_315*DENDRO_50 + DENDRO_44*DENDRO_440 + DENDRO_56) + DENDRO_379*(DENDRO_158 + DENDRO_55*DENDRO_81)) + DENDRO_380*DENDRO_53 + DENDRO_381*DENDRO_48 + DENDRO_382*DENDRO_44 + DENDRO_383*gt0[pp])) + DENDRO_33*(-4*DENDRO_305 + 4*DENDRO_306 + DENDRO_307*(DENDRO_274 + DENDRO_311) + 4*DENDRO_59*(-DENDRO_312 + DENDRO_313 - DENDRO_314 + DENDRO_318) + alpha[pp]*(DENDRO_110*DENDRO_382 + DENDRO_123*(1.0*DENDRO_350 + DENDRO_355) + DENDRO_123*(DENDRO_367 + DENDRO_368) - DENDRO_128*(DENDRO_351 + DENDRO_352) - DENDRO_128*(DENDRO_110*DENDRO_262 + DENDRO_353) - DENDRO_129*(1.0*DENDRO_352 + DENDRO_356) - DENDRO_129*(DENDRO_363 + DENDRO_364) - DENDRO_129*(DENDRO_274*DENDRO_370 + DENDRO_369) + DENDRO_134*(DENDRO_357 + 0.5*DENDRO_358) + DENDRO_134*(DENDRO_167*DENDRO_226 + DENDRO_348) + DENDRO_134*(DENDRO_197*DENDRO_249 + DENDRO_347) + DENDRO_134*(DENDRO_267*DENDRO_50 + DENDRO_361) - DENDRO_149*DENDRO_339 - DENDRO_207*(DENDRO_371 + DENDRO_372*(DENDRO_252 + DENDRO_29*DENDRO_376 + DENDRO_315*DENDRO_374) + DENDRO_377*(DENDRO_224*DENDRO_378 + DENDRO_26*DENDRO_374 + DENDRO_376*DENDRO_81) + DENDRO_379*(DENDRO_271 + DENDRO_31*DENDRO_376 + DENDRO_374*DENDRO_81)) + DENDRO_216*DENDRO_380 + DENDRO_224*DENDRO_381 - DENDRO_256*DENDRO_342 - DENDRO_262*DENDRO_343 - DENDRO_267*DENDRO_344 + DENDRO_328*(DENDRO_349 + DENDRO_350) + DENDRO_328*(DENDRO_110*DENDRO_267 + DENDRO_359) + DENDRO_338 - DENDRO_340*DENDRO_341 - DENDRO_345*(DENDRO_162 + DENDRO_197) - DENDRO_346*(DENDRO_226 + DENDRO_228) + DENDRO_383*gt5[pp]))); const double DENDRO_740 = grad_1_beta0[pp]; const double DENDRO_741 = grad_1_beta2[pp]; const double DENDRO_742 = (1.0L/3.0L)*At1[pp]; const double DENDRO_743 = (2.0L/3.0L)*DENDRO_3; const double DENDRO_744 = At4[pp]*DENDRO_25; const double DENDRO_745 = -At3[pp]*DENDRO_26 + DENDRO_28 + DENDRO_744; const double DENDRO_746 = -At1[pp]*DENDRO_31 + At3[pp]*DENDRO_23 - At4[pp]*DENDRO_29; const double DENDRO_747 = -At1[pp]*DENDRO_29 + At3[pp]*DENDRO_25 - At4[pp]*DENDRO_33; const double DENDRO_748 = 6.0*DENDRO_36; const double DENDRO_749 = DENDRO_19*DENDRO_42; const double DENDRO_750 = 6.0*DENDRO_73; const double DENDRO_751 = 1.0*DENDRO_101*DENDRO_25; const double DENDRO_752 = -DENDRO_246 + DENDRO_597; const double DENDRO_753 = DENDRO_126*DENDRO_752; const double DENDRO_754 = -DENDRO_264 + DENDRO_598; const double DENDRO_755 = DENDRO_126*DENDRO_154; const double DENDRO_756 = DENDRO_53*DENDRO_754 + DENDRO_755; const double DENDRO_757 = DENDRO_48*DENDRO_754; const double DENDRO_758 = DENDRO_126*DENDRO_147; const double DENDRO_759 = DENDRO_111*DENDRO_147; const double DENDRO_760 = DENDRO_234 - DENDRO_239 + DENDRO_240; const double DENDRO_761 = DENDRO_154*DENDRO_426; const double DENDRO_762 = DENDRO_179*DENDRO_752; const double DENDRO_763 = DENDRO_226*DENDRO_72; const double DENDRO_764 = DENDRO_183*DENDRO_627; const double DENDRO_765 = DENDRO_180 + DENDRO_181; const double DENDRO_766 = DENDRO_132*DENDRO_147; const double DENDRO_767 = DENDRO_159*DENDRO_525 + DENDRO_509*DENDRO_754; const double DENDRO_768 = -DENDRO_173*DENDRO_430; const double DENDRO_769 = -0.25*DENDRO_117*DENDRO_25 + 0.25*DENDRO_119*DENDRO_33 + 0.25*DENDRO_29*DENDRO_53; const double DENDRO_770 = 0.5*DENDRO_19*DENDRO_40; const double DENDRO_771 = 0.5*DENDRO_19*DENDRO_38; const double DENDRO_772 = 0.5*DENDRO_19*DENDRO_39; const double DENDRO_773 = grad_2_beta0[pp]; const double DENDRO_774 = grad_2_beta1[pp]; const double DENDRO_775 = (1.0L/3.0L)*At2[pp]; const double DENDRO_776 = (2.0L/3.0L)*DENDRO_2; const double DENDRO_777 = At2[pp]*DENDRO_23 - At4[pp]*DENDRO_26 + At5[pp]*DENDRO_25; const double DENDRO_778 = -At2[pp]*DENDRO_31 + At4[pp]*DENDRO_23 - At5[pp]*DENDRO_29; const double DENDRO_779 = -At5[pp]*DENDRO_33 + DENDRO_30 + DENDRO_744; const double DENDRO_780 = 6.0*DENDRO_59; const double DENDRO_781 = DENDRO_110*DENDRO_154 + DENDRO_757; const double DENDRO_782 = DENDRO_173*DENDRO_354; const double DENDRO_783 = -DENDRO_782; const double DENDRO_784 = DENDRO_183*DENDRO_335; const double DENDRO_785 = DENDRO_191 + DENDRO_763; const double DENDRO_786 = DENDRO_119*DENDRO_154; const double DENDRO_787 = DENDRO_160*DENDRO_183; const double DENDRO_788 = DENDRO_550*DENDRO_72; const double DENDRO_789 = DENDRO_252 - DENDRO_253 + DENDRO_254; const double DENDRO_790 = DENDRO_147*DENDRO_365; const double DENDRO_791 = DENDRO_271 + DENDRO_272 - DENDRO_273; const double DENDRO_792 = DENDRO_154*DENDRO_365; const double DENDRO_793 = DENDRO_119*DENDRO_147; const double DENDRO_794 = DENDRO_154*DENDRO_205; const double DENDRO_795 = (2.0L/3.0L)*DENDRO_0; const double DENDRO_796 = 2*At4[pp]; const double DENDRO_797 = 2*At3[pp]*DENDRO_19; const double DENDRO_798 = 2*At4[pp]*DENDRO_19; const double DENDRO_799 = 12*DENDRO_19*DENDRO_73; const double DENDRO_800 = DENDRO_218*DENDRO_760; const double DENDRO_801 = DENDRO_117*DENDRO_760; const double DENDRO_802 = DENDRO_173*DENDRO_335; const double DENDRO_803 = 1.0*DENDRO_126*DENDRO_31 + 1.0*DENDRO_216*DENDRO_29 - 1.0*DENDRO_218*DENDRO_23; const double DENDRO_804 = 0.25*DENDRO_755; const double DENDRO_805 = (1.0L/3.0L)*At4[pp]; const double DENDRO_806 = DENDRO_160*DENDRO_752; const double DENDRO_807 = DENDRO_636 - DENDRO_802; const double DENDRO_808 = -DENDRO_335*DENDRO_752 + DENDRO_644; const double DENDRO_809 = 0.25*DENDRO_126*DENDRO_31 + 0.25*DENDRO_216*DENDRO_29 - 0.25*DENDRO_218*DENDRO_23; const double DENDRO_810 = DENDRO_224*DENDRO_752; const double DENDRO_811 = DENDRO_216*DENDRO_789; const double DENDRO_812 = DENDRO_183*DENDRO_224; const double DENDRO_813 = DENDRO_110*DENDRO_789; // Dendro: printing variables At_rhs00[pp] = (4.0L/3.0L)*At0[pp]*DENDRO_0 - DENDRO_1*DENDRO_2 - DENDRO_1*DENDRO_3 + DENDRO_34*(-12*DENDRO_35 + 12*DENDRO_58 - DENDRO_60*(-DENDRO_66 + DENDRO_72) - 12*DENDRO_73*(DENDRO_75 + DENDRO_77 - DENDRO_79 - DENDRO_86) + DENDRO_739*gt0[pp] + DENDRO_87*(-DENDRO_115*(DENDRO_187 + DENDRO_188) - DENDRO_115*(DENDRO_173*DENDRO_48 + DENDRO_193) - DENDRO_123*(DENDRO_191 + 0.5*DENDRO_192) - DENDRO_123*(DENDRO_194 - DENDRO_196) - DENDRO_123*(DENDRO_200 + DENDRO_203) - DENDRO_123*(DENDRO_147*DENDRO_54 + DENDRO_180) - DENDRO_123*(DENDRO_154*DENDRO_49 + DENDRO_181) + DENDRO_128*(DENDRO_185 + DENDRO_186) + DENDRO_128*(DENDRO_183*DENDRO_48 + DENDRO_184) + DENDRO_129*(1.0*DENDRO_186 + DENDRO_189) - DENDRO_129*(-DENDRO_183*DENDRO_205 + DENDRO_206*DENDRO_72) - DENDRO_134*(1.0*DENDRO_188 + DENDRO_190) - DENDRO_134*(1.0*DENDRO_126*DENDRO_72 + DENDRO_173*DENDRO_205) + DENDRO_141 + DENDRO_142*DENDRO_148 + DENDRO_149*DENDRO_155 + DENDRO_156*(DENDRO_124 + DENDRO_139) + DENDRO_157*DENDRO_159*DENDRO_31 + DENDRO_168*(DENDRO_160 + DENDRO_163) + DENDRO_173*DENDRO_176 + DENDRO_177*DENDRO_178*DENDRO_72 - DENDRO_207*(DENDRO_209 + DENDRO_210*DENDRO_211 + DENDRO_212*DENDRO_57 + DENDRO_213*DENDRO_214) - DENDRO_244*DENDRO_53 - DENDRO_261*DENDRO_48 - DENDRO_280*DENDRO_44 - DENDRO_304*gt0[pp])) + DENDRO_4*DENDRO_5 + DENDRO_6*DENDRO_7 - alpha[pp]*(-At0[pp]*K[pp] + DENDRO_20*(At0[pp]*DENDRO_23 - At1[pp]*DENDRO_26 + At2[pp]*DENDRO_25) + DENDRO_27*(-At0[pp]*DENDRO_31 + DENDRO_28 + DENDRO_30) + DENDRO_32*(-At0[pp]*DENDRO_29 + At1[pp]*DENDRO_25 - At2[pp]*DENDRO_33)) + beta0[pp]*agrad_0_At0[pp] + beta1[pp]*agrad_1_At0[pp] + beta2[pp]*agrad_2_At0[pp]; At_rhs01[pp] = At0[pp]*DENDRO_740 - At1[pp]*DENDRO_743 + At2[pp]*DENDRO_741 + At3[pp]*DENDRO_5 + At4[pp]*DENDRO_7 + DENDRO_0*DENDRO_742 + DENDRO_2*DENDRO_742 + DENDRO_34*(-12*DENDRO_674 - DENDRO_675*(-6.0*DENDRO_117*DENDRO_25 + 6.0*DENDRO_119*DENDRO_33 + 6.0*DENDRO_29*DENDRO_53 - 6.0*DENDRO_37*DENDRO_64*gt1[pp]) + DENDRO_739*gt1[pp] + DENDRO_748*(DENDRO_37*(DENDRO_463 + DENDRO_749*gt1[pp]) + DENDRO_677) - DENDRO_750*(-DENDRO_678 + DENDRO_679 + DENDRO_680 - DENDRO_681) + DENDRO_87*(DENDRO_101*DENDRO_33*(DENDRO_757 + DENDRO_758 + DENDRO_759) + DENDRO_102*(-DENDRO_347 + DENDRO_540 + DENDRO_634) - DENDRO_108*DENDRO_261 - DENDRO_115*(DENDRO_117*DENDRO_159 + DENDRO_155) + DENDRO_123*(-DENDRO_525*DENDRO_760 + DENDRO_712) - DENDRO_123*(-DENDRO_637 + DENDRO_730 + DENDRO_731) + DENDRO_123*(DENDRO_147*DENDRO_426 - DENDRO_154*DENDRO_627 + DENDRO_667) + DENDRO_129*(DENDRO_766 + DENDRO_767) + DENDRO_129*(DENDRO_159*DENDRO_417 + DENDRO_765) + DENDRO_129*(DENDRO_203 + DENDRO_733 + DENDRO_734) + DENDRO_129*(DENDRO_762 + DENDRO_763 + DENDRO_764) + DENDRO_134*(-DENDRO_54*DENDRO_760 + DENDRO_709) + DENDRO_134*(-DENDRO_111*DENDRO_769 - DENDRO_238*DENDRO_72 + DENDRO_728) - DENDRO_134*(DENDRO_119*DENDRO_769 + DENDRO_126*DENDRO_769 + DENDRO_237*DENDRO_72) + DENDRO_134*(-DENDRO_132*DENDRO_154 + DENDRO_140*DENDRO_159 + DENDRO_726) - DENDRO_137*DENDRO_244 + DENDRO_177*(DENDRO_159*DENDRO_54 + DENDRO_159*DENDRO_55 + DENDRO_190) + DENDRO_177*(DENDRO_173*DENDRO_179 + 0.5*DENDRO_193 + DENDRO_417*DENDRO_72) - DENDRO_207*(DENDRO_121*DENDRO_770 + DENDRO_199*DENDRO_771 + DENDRO_269*DENDRO_772 + DENDRO_683) - DENDRO_280*DENDRO_51 - DENDRO_304*gt1[pp] + DENDRO_323*(-DENDRO_701 + DENDRO_702) - DENDRO_323*(-DENDRO_124*DENDRO_154 + DENDRO_717 + DENDRO_761) - DENDRO_323*(-DENDRO_173*DENDRO_324 + DENDRO_721 + DENDRO_768) + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 + DENDRO_699 + DENDRO_700 + DENDRO_704 + DENDRO_706 + DENDRO_710 + DENDRO_714 - DENDRO_751*(DENDRO_117*DENDRO_147 + DENDRO_756) - DENDRO_751*(DENDRO_173*DENDRO_216 + DENDRO_183*DENDRO_218 + DENDRO_753))) - alpha[pp]*(-At1[pp]*K[pp] + DENDRO_20*DENDRO_745 + DENDRO_27*DENDRO_746 + DENDRO_32*DENDRO_747) + beta0[pp]*agrad_0_At1[pp] + beta1[pp]*agrad_1_At1[pp] + beta2[pp]*agrad_2_At1[pp]; At_rhs02[pp] = At0[pp]*DENDRO_773 + At1[pp]*DENDRO_774 - At2[pp]*DENDRO_776 + At4[pp]*DENDRO_5 + At5[pp]*DENDRO_7 + DENDRO_0*DENDRO_775 + DENDRO_3*DENDRO_775 + DENDRO_34*(-12*DENDRO_457 + 6.0*DENDRO_458 + DENDRO_739*gt2[pp] - DENDRO_750*(DENDRO_467 + DENDRO_468 - DENDRO_469 - DENDRO_471) - DENDRO_780*(DENDRO_460 + DENDRO_461 - DENDRO_462 - DENDRO_465) + DENDRO_87*(DENDRO_101*DENDRO_26*(DENDRO_756 + DENDRO_786) + DENDRO_102*(DENDRO_534 - DENDRO_558) - DENDRO_102*(-DENDRO_147*DENDRO_160 - DENDRO_49*DENDRO_791 + DENDRO_790) - DENDRO_106*DENDRO_244 - DENDRO_123*(-DENDRO_553 + DENDRO_554 + DENDRO_580) + DENDRO_123*(DENDRO_365*DENDRO_752 + DENDRO_783 - DENDRO_784) - DENDRO_123*(DENDRO_525*DENDRO_789 + DENDRO_782 + DENDRO_784) + DENDRO_123*(-DENDRO_54*DENDRO_791 + DENDRO_792 - 0.25*DENDRO_793) + DENDRO_128*(DENDRO_110*DENDRO_159 + DENDRO_148) - DENDRO_129*(-DENDRO_147*DENDRO_205 + DENDRO_159*DENDRO_198 - DENDRO_520*DENDRO_791) - DENDRO_129*(DENDRO_183*DENDRO_365 - DENDRO_787 - DENDRO_788) + DENDRO_129*(DENDRO_49*DENDRO_789 + DENDRO_787 + DENDRO_788) - DENDRO_134*(0.25*DENDRO_192 + DENDRO_785) - DENDRO_134*(DENDRO_762 + DENDRO_785) - DENDRO_134*(DENDRO_767 + DENDRO_794) - DENDRO_134*(DENDRO_159*DENDRO_531 + DENDRO_765) - DENDRO_161*DENDRO_261 + DENDRO_177*(DENDRO_179*DENDRO_183 + 1.0*DENDRO_184) + DENDRO_177*(DENDRO_159*DENDRO_49 + DENDRO_159*DENDRO_50 + DENDRO_189) - DENDRO_207*(DENDRO_113*DENDRO_770 + DENDRO_167*DENDRO_771 + DENDRO_262*DENDRO_772 + DENDRO_473) - DENDRO_280*DENDRO_46 - DENDRO_304*gt2[pp] + DENDRO_323*(DENDRO_173*DENDRO_226 + 0.25*DENDRO_753) + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 + DENDRO_562 + DENDRO_569 + DENDRO_574 + DENDRO_576 + DENDRO_577 + DENDRO_582 + DENDRO_583 + DENDRO_584 - DENDRO_751*(DENDRO_758 + DENDRO_781))) - alpha[pp]*(-At2[pp]*K[pp] + DENDRO_20*DENDRO_777 + DENDRO_27*DENDRO_778 + DENDRO_32*DENDRO_779) + beta0[pp]*agrad_0_At2[pp] + beta1[pp]*agrad_1_At2[pp] + beta2[pp]*agrad_2_At2[pp]; At_rhs11[pp] = (4.0L/3.0L)*At3[pp]*DENDRO_2 - At3[pp]*DENDRO_743 - At3[pp]*DENDRO_795 + DENDRO_34*(12*DENDRO_36*(DENDRO_37*(DENDRO_387 + DENDRO_388*DENDRO_749) + DENDRO_390) - 12*DENDRO_384 + DENDRO_60*(DENDRO_258 - DENDRO_386*(DENDRO_316 - DENDRO_61)) + DENDRO_739*gt3[pp] + DENDRO_799*(DENDRO_277 - DENDRO_386*(-DENDRO_309 + DENDRO_84)) + DENDRO_87*(DENDRO_101*DENDRO_26*DENDRO_399*DENDRO_760 + DENDRO_115*(DENDRO_408 - DENDRO_801) + DENDRO_115*(-DENDRO_117*DENDRO_154 + DENDRO_411) + DENDRO_115*(-DENDRO_173*DENDRO_218 + DENDRO_420) - DENDRO_117*DENDRO_280 + DENDRO_123*(DENDRO_412 - 1.0*DENDRO_800) - DENDRO_123*(DENDRO_427 - 1.0*DENDRO_429) - DENDRO_123*(DENDRO_430*DENDRO_752 + DENDRO_432) + DENDRO_129*(DENDRO_423 - DENDRO_425) + DENDRO_129*(DENDRO_130*DENDRO_754 + DENDRO_154*DENDRO_417) + DENDRO_129*(DENDRO_417*DENDRO_752 + DENDRO_802) + DENDRO_129*(DENDRO_55*DENDRO_803 + DENDRO_804) + DENDRO_134*(DENDRO_413 - 1.0*DENDRO_801) + DENDRO_134*(DENDRO_434 + DENDRO_761) + DENDRO_134*(DENDRO_436 + DENDRO_768) + DENDRO_154*DENDRO_401 + DENDRO_173*DENDRO_404 - DENDRO_207*(DENDRO_210*DENDRO_258 + DENDRO_212*DENDRO_241 + DENDRO_213*DENDRO_277 + DENDRO_437) - DENDRO_218*DENDRO_261 - DENDRO_233*DENDRO_244 - DENDRO_304*gt3[pp] + DENDRO_328*(DENDRO_406 - DENDRO_800) + DENDRO_328*(-DENDRO_117*DENDRO_754 + DENDRO_419) + DENDRO_328*(-DENDRO_218*DENDRO_752 + DENDRO_410) + DENDRO_398 + DENDRO_400*(DENDRO_229 + DENDRO_335) + DENDRO_403*DENDRO_754 + DENDRO_405*(DENDRO_139 + DENDRO_438))) + DENDRO_4*DENDRO_740 + DENDRO_741*DENDRO_796 - alpha[pp]*(-At3[pp]*K[pp] + DENDRO_20*DENDRO_746 + DENDRO_745*DENDRO_797 + DENDRO_747*DENDRO_798) + beta0[pp]*agrad_0_At3[pp] + beta1[pp]*agrad_1_At3[pp] + beta2[pp]*agrad_2_At3[pp]; At_rhs12[pp] = At1[pp]*DENDRO_773 + At2[pp]*DENDRO_740 + At3[pp]*DENDRO_774 - At4[pp]*DENDRO_795 + At5[pp]*DENDRO_741 + DENDRO_2*DENDRO_805 + DENDRO_3*DENDRO_805 + DENDRO_34*(-12*DENDRO_585 - DENDRO_586*(6.0*DENDRO_126*DENDRO_31 + 6.0*DENDRO_216*DENDRO_29 - 6.0*DENDRO_218*DENDRO_23 - 6.0*DENDRO_310*DENDRO_37*gt4[pp]) + DENDRO_739*gt4[pp] + DENDRO_748*(DENDRO_37*(DENDRO_470 + DENDRO_749*gt4[pp]) + DENDRO_589) - DENDRO_780*(-DENDRO_590 + DENDRO_591 + DENDRO_592 - DENDRO_594) + DENDRO_87*(DENDRO_101*DENDRO_29*(DENDRO_759 + DENDRO_781) + DENDRO_102*(DENDRO_632 - DENDRO_655) - DENDRO_102*(-DENDRO_160*DENDRO_754 + DENDRO_365*DENDRO_754 - DENDRO_417*DENDRO_791) - DENDRO_103*DENDRO_280 + DENDRO_123*(DENDRO_230*DENDRO_760 + DENDRO_671) + DENDRO_123*(-DENDRO_237*DENDRO_789 + DENDRO_808) + DENDRO_123*(DENDRO_336*DENDRO_752 + DENDRO_808) - DENDRO_123*(DENDRO_119*DENDRO_809 + DENDRO_136*DENDRO_791 + DENDRO_672) + DENDRO_123*(-DENDRO_126*DENDRO_809 - DENDRO_627*DENDRO_754 + DENDRO_649) - DENDRO_129*(DENDRO_183*DENDRO_336 + DENDRO_783 - DENDRO_806) - DENDRO_129*(-DENDRO_205*DENDRO_754 - DENDRO_55*DENDRO_791 + DENDRO_792) + DENDRO_129*(DENDRO_417*DENDRO_789 + DENDRO_782 + DENDRO_806) + DENDRO_134*(-DENDRO_183*DENDRO_324 + DENDRO_807) + DENDRO_134*(-DENDRO_531*DENDRO_760 + DENDRO_665) + DENDRO_134*(-DENDRO_627*DENDRO_752 + DENDRO_807) + DENDRO_134*(-DENDRO_124*DENDRO_147 + DENDRO_667 - 0.25*DENDRO_786) + DENDRO_134*(-DENDRO_132*DENDRO_754 + DENDRO_647 - DENDRO_804) + DENDRO_134*(-DENDRO_417*DENDRO_760 + DENDRO_518 + DENDRO_616) + DENDRO_177*(DENDRO_173*DENDRO_197 + DENDRO_764) + DENDRO_177*(DENDRO_180 + DENDRO_766 + DENDRO_794) - DENDRO_207*(DENDRO_221*DENDRO_770 + DENDRO_249*DENDRO_771 + DENDRO_267*DENDRO_772 + DENDRO_596) - DENDRO_227*DENDRO_261 - DENDRO_235*DENDRO_244 - DENDRO_304*gt4[pp] - DENDRO_323*(-DENDRO_324*DENDRO_752 + DENDRO_653) - DENDRO_323*(-DENDRO_124*DENDRO_754 + DENDRO_642 + DENDRO_673) - DENDRO_323*(-DENDRO_237*DENDRO_760 - DENDRO_238*DENDRO_760 + DENDRO_412) + DENDRO_328*(-DENDRO_216*DENDRO_760 + DENDRO_394) + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 + DENDRO_657 + DENDRO_662 + DENDRO_663 + DENDRO_669)) - alpha[pp]*(-At4[pp]*K[pp] + DENDRO_20*DENDRO_778 + DENDRO_777*DENDRO_797 + DENDRO_779*DENDRO_798) + beta0[pp]*agrad_0_At4[pp] + beta1[pp]*agrad_1_At4[pp] + beta2[pp]*agrad_2_At4[pp]; At_rhs22[pp] = (4.0L/3.0L)*At5[pp]*DENDRO_3 - At5[pp]*DENDRO_776 - At5[pp]*DENDRO_795 + DENDRO_34*(-12*DENDRO_305 + 12*DENDRO_306 - 12*DENDRO_59*(DENDRO_312 - DENDRO_313 + DENDRO_314 - DENDRO_318) + DENDRO_739*gt5[pp] - DENDRO_799*(-DENDRO_311 + DENDRO_791) + DENDRO_87*(DENDRO_110*DENDRO_183*DENDRO_340 - DENDRO_110*DENDRO_280 - DENDRO_123*(DENDRO_366 - 1.0*DENDRO_368) - DENDRO_123*(0.25*DENDRO_810 + 1.0*DENDRO_811) + DENDRO_128*(DENDRO_812 + DENDRO_813) + DENDRO_128*(DENDRO_110*DENDRO_147 + DENDRO_48*DENDRO_791) + DENDRO_129*(DENDRO_362 - 1.0*DENDRO_364) + DENDRO_129*(0.25*DENDRO_812 + 1.0*DENDRO_813) - DENDRO_129*(-DENDRO_370*DENDRO_791 + DENDRO_790) - DENDRO_134*(DENDRO_179*DENDRO_754 + 0.5*DENDRO_793) - DENDRO_134*(DENDRO_183*DENDRO_226 + DENDRO_806) - DENDRO_134*(DENDRO_197*DENDRO_752 + DENDRO_784) - DENDRO_134*(DENDRO_50*DENDRO_803 + 0.25*DENDRO_758) + DENDRO_147*DENDRO_343 + DENDRO_149*DENDRO_216*DENDRO_752 - DENDRO_207*(DENDRO_210*DENDRO_255 + DENDRO_212*DENDRO_231 + DENDRO_213*DENDRO_274 + DENDRO_371) - DENDRO_216*DENDRO_244 - DENDRO_224*DENDRO_261 - DENDRO_304*gt5[pp] - DENDRO_328*(DENDRO_810 + DENDRO_811) - DENDRO_328*(DENDRO_110*DENDRO_754 + DENDRO_126*DENDRO_791) + DENDRO_33*DENDRO_342*DENDRO_789 + DENDRO_338 + DENDRO_344*DENDRO_754 + DENDRO_345*(DENDRO_163 + DENDRO_375) + DENDRO_346*(DENDRO_229 + DENDRO_373))) + DENDRO_6*DENDRO_773 + DENDRO_774*DENDRO_796 - alpha[pp]*(At5[pp]*DENDRO_297*DENDRO_779 - At5[pp]*K[pp] + DENDRO_32*DENDRO_778 + DENDRO_777*DENDRO_798) + beta0[pp]*agrad_0_At5[pp] + beta1[pp]*agrad_1_At5[pp] + beta2[pp]*agrad_2_At5[pp]; // Dendro: reduced ops: 3569 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(At_rhs12, &__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs11, &__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs22, &__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs02, &__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs00, &__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs01, &__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_At_rhs /**@brief compute K_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_K_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for K_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={4,4,4}; //input vars begin double * K = __sm_base + 0; double * gt1 = __sm_base + 64; double * beta1 = __sm_base + 128; double * gt3 = __sm_base + 192; double * At1 = __sm_base + 256; double * gt5 = __sm_base + 320; double * alpha = __sm_base + 384; double * gt4 = __sm_base + 448; double * gt2 = __sm_base + 512; double * At3 = __sm_base + 576; double * beta2 = __sm_base + 640; double * At4 = __sm_base + 704; double * At0 = __sm_base + 768; double * At2 = __sm_base + 832; double * beta0 = __sm_base + 896; double * gt0 = __sm_base + 960; double * chi = __sm_base + 1024; double * At5 = __sm_base + 1088; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 1152; double * grad_1_gt0 = __sm_base + 1216; double * grad2_0_1_alpha = __sm_base + 1280; double * grad2_2_2_alpha = __sm_base + 1344; double * grad_2_gt0 = __sm_base + 1408; double * grad_0_gt4 = __sm_base + 1472; double * grad_2_gt3 = __sm_base + 1536; double * grad_1_alpha = __sm_base + 1600; double * grad_2_alpha = __sm_base + 1664; double * grad2_1_1_alpha = __sm_base + 1728; double * grad_1_gt5 = __sm_base + 1792; double * grad_0_gt1 = __sm_base + 1856; double * grad_1_gt4 = __sm_base + 1920; double * agrad_2_K = __sm_base + 1984; double * grad_1_gt1 = __sm_base + 2048; double * grad_2_gt4 = __sm_base + 2112; double * grad_0_alpha = __sm_base + 2176; double * grad_0_chi = __sm_base + 2240; double * grad2_0_0_alpha = __sm_base + 2304; double * agrad_1_K = __sm_base + 2368; double * grad_2_gt2 = __sm_base + 2432; double * grad_1_chi = __sm_base + 2496; double * grad_0_gt0 = __sm_base + 2560; double * grad_0_gt3 = __sm_base + 2624; double * grad2_1_2_alpha = __sm_base + 2688; double * grad_2_gt5 = __sm_base + 2752; double * agrad_0_K = __sm_base + 2816; double * grad_1_gt3 = __sm_base + 2880; double * grad_2_chi = __sm_base + 2944; double * grad_2_gt1 = __sm_base + 3008; double * grad_0_gt2 = __sm_base + 3072; double * grad2_0_2_alpha = __sm_base + 3136; double * grad_1_gt2 = __sm_base + 3200; // deriv vars end // output vars begin double * K_rhs = __sm_base + 3264; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 3960 // Dendro: printing temp variables const double DENDRO_0 = pow(gt4[pp], 2); const double DENDRO_1 = DENDRO_0*gt0[pp]; const double DENDRO_2 = pow(gt1[pp], 2); const double DENDRO_3 = DENDRO_2*gt5[pp]; const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = DENDRO_4*gt3[pp]; const double DENDRO_6 = gt0[pp]*gt3[pp]; const double DENDRO_7 = DENDRO_6*gt5[pp]; const double DENDRO_8 = gt1[pp]*gt2[pp]; const double DENDRO_9 = 2*DENDRO_8*gt4[pp]; const double DENDRO_10 = DENDRO_1 + DENDRO_3 + DENDRO_5 - DENDRO_7 - DENDRO_9; const double DENDRO_11 = 1.0/DENDRO_10; const double DENDRO_12 = DENDRO_11*chi[pp]; const double DENDRO_13 = -DENDRO_2 + DENDRO_6; const double DENDRO_14 = grad_1_alpha[pp]; const double DENDRO_15 = 1.0/(-DENDRO_1 - DENDRO_3 - DENDRO_5 + DENDRO_7 + DENDRO_9); const double DENDRO_16 = DENDRO_14*DENDRO_15; const double DENDRO_17 = grad_2_gt5[pp]; const double DENDRO_18 = -0.5*gt0[pp]*gt4[pp] + 0.5*gt1[pp]*gt2[pp]; const double DENDRO_19 = gt2[pp]*gt4[pp]; const double DENDRO_20 = gt1[pp]*gt5[pp]; const double DENDRO_21 = DENDRO_19 - DENDRO_20; const double DENDRO_22 = grad_0_gt5[pp]; const double DENDRO_23 = -0.5*DENDRO_22 + 1.0*grad_2_gt2[pp]; const double DENDRO_24 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_25 = grad_1_gt5[pp]; const double DENDRO_26 = -0.5*DENDRO_25 + 1.0*grad_2_gt4[pp]; const double DENDRO_27 = 0.5*gt5[pp]; const double DENDRO_28 = 1.0/chi[pp]; const double DENDRO_29 = grad_2_chi[pp]; const double DENDRO_30 = gt0[pp]*gt4[pp]; const double DENDRO_31 = -DENDRO_30 + DENDRO_8; const double DENDRO_32 = grad_0_chi[pp]; const double DENDRO_33 = grad_1_chi[pp]; const double DENDRO_34 = DENDRO_21*DENDRO_32 + DENDRO_24*DENDRO_33 + DENDRO_29*DENDRO_31; const double DENDRO_35 = DENDRO_28*DENDRO_34; const double DENDRO_36 = grad_0_alpha[pp]; const double DENDRO_37 = DENDRO_15*DENDRO_36; const double DENDRO_38 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_39 = -DENDRO_0 + gt3[pp]*gt5[pp]; const double DENDRO_40 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_41 = DENDRO_21*DENDRO_33 + DENDRO_29*DENDRO_40 + DENDRO_32*DENDRO_39; const double DENDRO_42 = DENDRO_28*DENDRO_41; const double DENDRO_43 = grad_2_alpha[pp]; const double DENDRO_44 = DENDRO_13*DENDRO_15; const double DENDRO_45 = DENDRO_15*DENDRO_31; const double DENDRO_46 = DENDRO_15*DENDRO_40; const double DENDRO_47 = DENDRO_13*DENDRO_29 + DENDRO_31*DENDRO_33 + DENDRO_32*DENDRO_40; const double DENDRO_48 = DENDRO_15*DENDRO_43; const double DENDRO_49 = grad_1_gt3[pp]; const double DENDRO_50 = grad_0_gt3[pp]; const double DENDRO_51 = -0.5*DENDRO_50 + 1.0*grad_1_gt1[pp]; const double DENDRO_52 = grad_2_gt3[pp]; const double DENDRO_53 = -0.5*DENDRO_52 + 1.0*grad_1_gt4[pp]; const double DENDRO_54 = 0.5*gt3[pp]; const double DENDRO_55 = DENDRO_28*DENDRO_47; const double DENDRO_56 = -0.5*gt1[pp]*gt5[pp] + 0.5*gt2[pp]*gt4[pp]; const double DENDRO_57 = DENDRO_15*DENDRO_24; const double DENDRO_58 = DENDRO_15*DENDRO_21; const double DENDRO_59 = grad_0_gt0[pp]; const double DENDRO_60 = grad_1_gt0[pp]; const double DENDRO_61 = -0.5*DENDRO_60 + 1.0*grad_0_gt1[pp]; const double DENDRO_62 = grad_2_gt0[pp]; const double DENDRO_63 = -0.5*DENDRO_62 + 1.0*grad_0_gt2[pp]; const double DENDRO_64 = 0.5*gt0[pp]; const double DENDRO_65 = DENDRO_15*DENDRO_39; const double DENDRO_66 = 2*DENDRO_11*chi[pp]; const double DENDRO_67 = DENDRO_30 - DENDRO_8; const double DENDRO_68 = 0.5*DENDRO_15; const double DENDRO_69 = grad_1_gt2[pp]; const double DENDRO_70 = grad_2_gt1[pp]; const double DENDRO_71 = grad_0_gt4[pp]; const double DENDRO_72 = DENDRO_69 + DENDRO_70 - DENDRO_71; const double DENDRO_73 = 0.5*DENDRO_43; const double DENDRO_74 = DENDRO_15*gt4[pp]; const double DENDRO_75 = 0.5*DENDRO_14; const double DENDRO_76 = -DENDRO_69 + DENDRO_70 + DENDRO_71; const double DENDRO_77 = DENDRO_15*gt2[pp]; const double DENDRO_78 = 0.5*DENDRO_36; const double DENDRO_79 = -DENDRO_19 + DENDRO_20; const double DENDRO_80 = DENDRO_69 - DENDRO_70 + DENDRO_71; const double DENDRO_81 = DENDRO_15*gt1[pp]; const double DENDRO_82 = pow(DENDRO_10, -2); const double DENDRO_83 = 3*DENDRO_82; const double DENDRO_84 = pow(DENDRO_79, 2); const double DENDRO_85 = pow(DENDRO_40, 2); const double DENDRO_86 = DENDRO_40*DENDRO_79; const double DENDRO_87 = 2*At1[pp]*DENDRO_79; const double DENDRO_88 = 2*At2[pp]*DENDRO_40; const double DENDRO_89 = pow(DENDRO_67, 2); const double DENDRO_90 = DENDRO_67*DENDRO_79; const double DENDRO_91 = 2*At4[pp]*DENDRO_67; const double DENDRO_92 = DENDRO_40*DENDRO_67; const double DENDRO_93 = 6*DENDRO_82; const double DENDRO_94 = At0[pp]*DENDRO_39; const double DENDRO_95 = At5[pp]*DENDRO_13; const double DENDRO_96 = DENDRO_39*DENDRO_67; const double DENDRO_97 = DENDRO_13*DENDRO_79; const double DENDRO_98 = DENDRO_24*DENDRO_40; const double DENDRO_99 = At3[pp]*DENDRO_24; // Dendro: printing variables K_rhs[pp] = -DENDRO_12*DENDRO_13*(DENDRO_16*(DENDRO_17*DENDRO_18 + DENDRO_21*DENDRO_23 + DENDRO_24*DENDRO_26 + DENDRO_27*DENDRO_35) + DENDRO_37*(DENDRO_17*DENDRO_38 + DENDRO_21*DENDRO_26 + DENDRO_23*DENDRO_39 + DENDRO_27*DENDRO_42) + DENDRO_43*(0.5*DENDRO_17*DENDRO_44 + DENDRO_23*DENDRO_46 + DENDRO_26*DENDRO_45 - DENDRO_28*(-DENDRO_15*DENDRO_27*DENDRO_47 + 1.0*DENDRO_29)) - grad2_2_2_alpha[pp]) - DENDRO_12*DENDRO_24*(DENDRO_14*(-DENDRO_28*(-DENDRO_15*DENDRO_34*DENDRO_54 + 1.0*DENDRO_33) + DENDRO_45*DENDRO_53 + 0.5*DENDRO_49*DENDRO_57 + DENDRO_51*DENDRO_58) + DENDRO_37*(DENDRO_39*DENDRO_51 + DENDRO_40*DENDRO_53 + DENDRO_42*DENDRO_54 + DENDRO_49*DENDRO_56) + DENDRO_48*(DENDRO_13*DENDRO_53 + DENDRO_18*DENDRO_49 + DENDRO_40*DENDRO_51 + DENDRO_54*DENDRO_55) - grad2_1_1_alpha[pp]) - DENDRO_12*DENDRO_39*(DENDRO_16*(DENDRO_24*DENDRO_61 + DENDRO_31*DENDRO_63 + DENDRO_35*DENDRO_64 + DENDRO_56*DENDRO_59) + DENDRO_36*(-DENDRO_28*(-DENDRO_15*DENDRO_41*DENDRO_64 + 1.0*DENDRO_32) + DENDRO_46*DENDRO_63 + DENDRO_58*DENDRO_61 + 0.5*DENDRO_59*DENDRO_65) + DENDRO_48*(DENDRO_13*DENDRO_63 + DENDRO_31*DENDRO_61 + DENDRO_38*DENDRO_59 + DENDRO_55*DENDRO_64) - grad2_0_0_alpha[pp]) - DENDRO_40*DENDRO_66*(DENDRO_14*DENDRO_68*(DENDRO_21*DENDRO_62 + DENDRO_22*DENDRO_31 + DENDRO_24*DENDRO_76 + DENDRO_35*gt2[pp]) + DENDRO_73*(DENDRO_22*DENDRO_44 - DENDRO_28*(DENDRO_32 - DENDRO_47*DENDRO_77) + DENDRO_45*DENDRO_76 + DENDRO_46*DENDRO_62) + DENDRO_78*(DENDRO_22*DENDRO_46 - DENDRO_28*(DENDRO_29 - DENDRO_41*DENDRO_77) + DENDRO_58*DENDRO_76 + DENDRO_62*DENDRO_65) - grad2_0_2_alpha[pp]) + DENDRO_66*DENDRO_67*(DENDRO_36*DENDRO_68*(DENDRO_21*DENDRO_52 + DENDRO_25*DENDRO_40 + DENDRO_39*DENDRO_72 + DENDRO_42*gt4[pp]) + DENDRO_73*(DENDRO_25*DENDRO_44 - DENDRO_28*(DENDRO_33 - DENDRO_47*DENDRO_74) + DENDRO_45*DENDRO_52 + DENDRO_46*DENDRO_72) + DENDRO_75*(DENDRO_25*DENDRO_45 - DENDRO_28*(DENDRO_29 - DENDRO_34*DENDRO_74) + DENDRO_52*DENDRO_57 + DENDRO_58*DENDRO_72) - grad2_1_2_alpha[pp]) + DENDRO_66*DENDRO_79*(DENDRO_43*DENDRO_68*(DENDRO_13*DENDRO_80 + DENDRO_31*DENDRO_50 + DENDRO_40*DENDRO_60 + DENDRO_55*gt1[pp]) + DENDRO_75*(-DENDRO_28*(DENDRO_32 - DENDRO_34*DENDRO_81) + DENDRO_45*DENDRO_80 + DENDRO_50*DENDRO_57 + DENDRO_58*DENDRO_60) + DENDRO_78*(-DENDRO_28*(DENDRO_33 - DENDRO_41*DENDRO_81) + DENDRO_46*DENDRO_80 + DENDRO_50*DENDRO_58 + DENDRO_60*DENDRO_65) - grad2_0_1_alpha[pp]) + (1.0L/3.0L)*alpha[pp]*(At0[pp]*DENDRO_83*(At0[pp]*pow(DENDRO_39, 2) + At3[pp]*DENDRO_84 - 2*At4[pp]*DENDRO_86 + At5[pp]*DENDRO_85 - DENDRO_39*DENDRO_87 + DENDRO_39*DENDRO_88) + At1[pp]*DENDRO_93*(At1[pp]*DENDRO_24*DENDRO_39 + At1[pp]*DENDRO_84 - At2[pp]*DENDRO_86 - At2[pp]*DENDRO_96 + At4[pp]*DENDRO_90 + At4[pp]*DENDRO_98 - At5[pp]*DENDRO_92 - DENDRO_79*DENDRO_94 - DENDRO_79*DENDRO_99) + At2[pp]*DENDRO_93*(-At1[pp]*DENDRO_86 - At1[pp]*DENDRO_96 + At2[pp]*DENDRO_13*DENDRO_39 + At2[pp]*DENDRO_85 + At3[pp]*DENDRO_90 - At4[pp]*DENDRO_92 - At4[pp]*DENDRO_97 + DENDRO_40*DENDRO_94 + DENDRO_40*DENDRO_95) + At3[pp]*DENDRO_83*(At0[pp]*DENDRO_84 + 2*At2[pp]*DENDRO_90 + At3[pp]*pow(DENDRO_24, 2) + At5[pp]*DENDRO_89 - DENDRO_24*DENDRO_87 - DENDRO_24*DENDRO_91) + At4[pp]*DENDRO_93*(-At0[pp]*DENDRO_86 + At1[pp]*DENDRO_90 + At1[pp]*DENDRO_98 - At2[pp]*DENDRO_92 - At2[pp]*DENDRO_97 + At4[pp]*DENDRO_13*DENDRO_24 + At4[pp]*DENDRO_89 - DENDRO_67*DENDRO_95 - DENDRO_67*DENDRO_99) + At5[pp]*DENDRO_83*(At0[pp]*DENDRO_85 - 2*At1[pp]*DENDRO_92 + At3[pp]*DENDRO_89 + At5[pp]*pow(DENDRO_13, 2) + DENDRO_13*DENDRO_88 - DENDRO_13*DENDRO_91) + pow(K[pp], 2)) + beta0[pp]*agrad_0_K[pp] + beta1[pp]*agrad_1_K[pp] + beta2[pp]*agrad_2_K[pp]; // Dendro: reduced ops: 501 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(K_rhs, &__unzipOutVar[cuda::VAR::U_K][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_K_rhs /**@brief compute Gt_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_Gt_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for Gt_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 27; double * gt3 = __sm_base + 54; double * At1 = __sm_base + 81; double * gt5 = __sm_base + 108; double * alpha = __sm_base + 135; double * gt4 = __sm_base + 162; double * gt2 = __sm_base + 189; double * At3 = __sm_base + 216; double * beta2 = __sm_base + 243; double * At4 = __sm_base + 270; double * At0 = __sm_base + 297; double * At2 = __sm_base + 324; double * beta0 = __sm_base + 351; double * gt0 = __sm_base + 378; double * chi = __sm_base + 405; double * At5 = __sm_base + 432; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 459; double * grad_1_gt0 = __sm_base + 486; double * grad2_1_2_beta2 = __sm_base + 513; double * grad_2_K = __sm_base + 540; double * grad_0_beta0 = __sm_base + 567; double * grad_2_gt0 = __sm_base + 594; double * grad_0_gt4 = __sm_base + 621; double * grad2_1_1_beta1 = __sm_base + 648; double * grad_2_gt3 = __sm_base + 675; double * grad2_0_1_beta0 = __sm_base + 702; double * grad2_0_0_beta1 = __sm_base + 729; double * grad_1_K = __sm_base + 756; double * grad_1_alpha = __sm_base + 783; double * grad2_0_2_beta1 = __sm_base + 810; double * grad2_0_1_beta1 = __sm_base + 837; double * grad2_2_2_beta2 = __sm_base + 864; double * grad_1_beta1 = __sm_base + 891; double * grad_2_alpha = __sm_base + 918; double * grad_2_beta0 = __sm_base + 945; double * grad_0_gt1 = __sm_base + 972; double * grad_1_gt5 = __sm_base + 999; double * agrad_2_Gt1 = __sm_base + 1026; double * agrad_0_Gt2 = __sm_base + 1053; double * grad2_1_1_beta2 = __sm_base + 1080; double * grad_1_gt4 = __sm_base + 1107; double * grad2_2_2_beta1 = __sm_base + 1134; double * grad_1_gt1 = __sm_base + 1161; double * grad_2_gt4 = __sm_base + 1188; double * grad_0_beta1 = __sm_base + 1215; double * grad_0_alpha = __sm_base + 1242; double * grad_0_chi = __sm_base + 1269; double * grad_2_beta2 = __sm_base + 1296; double * grad2_1_2_beta0 = __sm_base + 1323; double * grad2_1_1_beta0 = __sm_base + 1350; double * agrad_0_Gt1 = __sm_base + 1377; double * grad_0_K = __sm_base + 1404; double * grad2_0_2_beta2 = __sm_base + 1431; double * agrad_0_Gt0 = __sm_base + 1458; double * agrad_1_Gt1 = __sm_base + 1485; double * grad2_0_0_beta0 = __sm_base + 1512; double * agrad_1_Gt2 = __sm_base + 1539; double * agrad_1_Gt0 = __sm_base + 1566; double * grad_2_gt2 = __sm_base + 1593; double * grad_1_chi = __sm_base + 1620; double * grad_0_gt0 = __sm_base + 1647; double * grad_0_gt3 = __sm_base + 1674; double * grad2_2_2_beta0 = __sm_base + 1701; double * agrad_2_Gt2 = __sm_base + 1728; double * grad_2_beta1 = __sm_base + 1755; double * grad_2_gt5 = __sm_base + 1782; double * grad_1_beta0 = __sm_base + 1809; double * grad2_0_1_beta2 = __sm_base + 1836; double * grad_1_gt3 = __sm_base + 1863; double * grad2_0_2_beta0 = __sm_base + 1890; double * grad_2_chi = __sm_base + 1917; double * grad_2_gt1 = __sm_base + 1944; double * agrad_2_Gt0 = __sm_base + 1971; double * grad_0_gt2 = __sm_base + 1998; double * grad_0_beta2 = __sm_base + 2025; double * grad_1_beta2 = __sm_base + 2052; double * grad2_0_0_beta2 = __sm_base + 2079; double * grad_1_gt2 = __sm_base + 2106; double * grad2_1_2_beta1 = __sm_base + 2133; // deriv vars end // output vars begin double * Gt_rhs0 = __sm_base + 2160; double * Gt_rhs2 = __sm_base + 2187; double * Gt_rhs1 = __sm_base + 2214; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 16710 // Dendro: printing temp variables const double DENDRO_0 = gt1[pp]*gt2[pp]; const double DENDRO_1 = -DENDRO_0 + gt0[pp]*gt4[pp]; const double DENDRO_2 = pow(gt4[pp], 2); const double DENDRO_3 = pow(gt1[pp], 2); const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = gt0[pp]*gt3[pp]; const double DENDRO_6 = -2*DENDRO_0*gt4[pp] + DENDRO_2*gt0[pp] + DENDRO_3*gt5[pp] + DENDRO_4*gt3[pp] - DENDRO_5*gt5[pp]; const double DENDRO_7 = 1.0/DENDRO_6; const double DENDRO_8 = grad2_0_2_beta0[pp]; const double DENDRO_9 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_10 = (7.0L/3.0L)*DENDRO_7*DENDRO_9; const double DENDRO_11 = grad2_1_2_beta1[pp]; const double DENDRO_12 = (1.0L/3.0L)*DENDRO_7*DENDRO_9; const double DENDRO_13 = grad2_2_2_beta2[pp]; const double DENDRO_14 = grad2_0_1_beta0[pp]; const double DENDRO_15 = gt1[pp]*gt5[pp] - gt2[pp]*gt4[pp]; const double DENDRO_16 = (7.0L/3.0L)*DENDRO_15*DENDRO_7; const double DENDRO_17 = grad2_1_1_beta1[pp]; const double DENDRO_18 = (1.0L/3.0L)*DENDRO_15*DENDRO_7; const double DENDRO_19 = grad2_1_2_beta2[pp]; const double DENDRO_20 = -DENDRO_3 + DENDRO_5; const double DENDRO_21 = DENDRO_20*DENDRO_7; const double DENDRO_22 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_23 = DENDRO_22*DENDRO_7; const double DENDRO_24 = grad2_0_0_beta0[pp]; const double DENDRO_25 = -DENDRO_2 + gt3[pp]*gt5[pp]; const double DENDRO_26 = DENDRO_25*DENDRO_7; const double DENDRO_27 = grad2_0_1_beta1[pp]; const double DENDRO_28 = (1.0L/3.0L)*DENDRO_25*DENDRO_7; const double DENDRO_29 = grad2_0_2_beta2[pp]; const double DENDRO_30 = pow(DENDRO_6, -2); const double DENDRO_31 = 2*DENDRO_30*grad_0_alpha[pp]; const double DENDRO_32 = pow(DENDRO_15, 2); const double DENDRO_33 = pow(DENDRO_9, 2); const double DENDRO_34 = DENDRO_15*DENDRO_9; const double DENDRO_35 = 2*At1[pp]*DENDRO_15; const double DENDRO_36 = 2*At2[pp]*DENDRO_9; const double DENDRO_37 = At0[pp]*pow(DENDRO_25, 2) + At3[pp]*DENDRO_32 - 2*At4[pp]*DENDRO_34 + At5[pp]*DENDRO_33 - DENDRO_25*DENDRO_35 + DENDRO_25*DENDRO_36; const double DENDRO_38 = (1.0L/3.0L)*DENDRO_7*alpha[pp]; const double DENDRO_39 = grad_0_K[pp]; const double DENDRO_40 = 1.0/chi[pp]; const double DENDRO_41 = 9*DENDRO_40*DENDRO_7*grad_0_chi[pp]; const double DENDRO_42 = grad_0_gt0[pp]; const double DENDRO_43 = grad_1_gt0[pp]; const double DENDRO_44 = -0.5*DENDRO_43 + 1.0*grad_0_gt1[pp]; const double DENDRO_45 = grad_2_gt0[pp]; const double DENDRO_46 = -0.5*DENDRO_45 + 1.0*grad_0_gt2[pp]; const double DENDRO_47 = DENDRO_15*DENDRO_44 - 0.5*DENDRO_25*DENDRO_42 - DENDRO_46*DENDRO_9; const double DENDRO_48 = pow(DENDRO_6, -3); const double DENDRO_49 = 2*DENDRO_37*DENDRO_48*alpha[pp]; const double DENDRO_50 = grad_1_gt3[pp]; const double DENDRO_51 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_52 = grad_2_gt3[pp]; const double DENDRO_53 = -0.5*DENDRO_52 + 1.0*grad_1_gt4[pp]; const double DENDRO_54 = grad_0_gt3[pp]; const double DENDRO_55 = 0.5*DENDRO_54 - 1.0*grad_1_gt1[pp]; const double DENDRO_56 = DENDRO_25*DENDRO_55 + DENDRO_50*DENDRO_51 - DENDRO_53*DENDRO_9; const double DENDRO_57 = pow(DENDRO_1, 2); const double DENDRO_58 = DENDRO_1*DENDRO_15; const double DENDRO_59 = 2*At4[pp]*DENDRO_1; const double DENDRO_60 = At0[pp]*DENDRO_32 + 2*At2[pp]*DENDRO_58 + At3[pp]*pow(DENDRO_22, 2) + At5[pp]*DENDRO_57 - DENDRO_22*DENDRO_35 - DENDRO_22*DENDRO_59; const double DENDRO_61 = 2*DENDRO_48*DENDRO_60*alpha[pp]; const double DENDRO_62 = grad_2_gt5[pp]; const double DENDRO_63 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_64 = grad_1_gt5[pp]; const double DENDRO_65 = 0.5*DENDRO_64 - 1.0*grad_2_gt4[pp]; const double DENDRO_66 = grad_0_gt5[pp]; const double DENDRO_67 = 0.5*DENDRO_66 - 1.0*grad_2_gt2[pp]; const double DENDRO_68 = -DENDRO_15*DENDRO_65 + DENDRO_25*DENDRO_67 - DENDRO_62*DENDRO_63; const double DENDRO_69 = DENDRO_1*DENDRO_9; const double DENDRO_70 = At0[pp]*DENDRO_33 - 2*At1[pp]*DENDRO_69 + At3[pp]*DENDRO_57 + At5[pp]*pow(DENDRO_20, 2) + DENDRO_20*DENDRO_36 - DENDRO_20*DENDRO_59; const double DENDRO_71 = 2*DENDRO_48*DENDRO_70*alpha[pp]; const double DENDRO_72 = 2*DENDRO_30*grad_2_alpha[pp]; const double DENDRO_73 = At0[pp]*DENDRO_25; const double DENDRO_74 = At5[pp]*DENDRO_20; const double DENDRO_75 = DENDRO_1*DENDRO_25; const double DENDRO_76 = DENDRO_15*DENDRO_20; const double DENDRO_77 = -At1[pp]*DENDRO_34 - At1[pp]*DENDRO_75 + At2[pp]*DENDRO_20*DENDRO_25 + At2[pp]*DENDRO_33 + At3[pp]*DENDRO_58 - At4[pp]*DENDRO_69 - At4[pp]*DENDRO_76 + DENDRO_73*DENDRO_9 + DENDRO_74*DENDRO_9; const double DENDRO_78 = 2*DENDRO_30*grad_1_alpha[pp]; const double DENDRO_79 = At1[pp]*DENDRO_32; const double DENDRO_80 = At4[pp]*DENDRO_58; const double DENDRO_81 = At2[pp]*DENDRO_34; const double DENDRO_82 = DENDRO_22*DENDRO_9; const double DENDRO_83 = At4[pp]*DENDRO_82; const double DENDRO_84 = At5[pp]*DENDRO_69; const double DENDRO_85 = DENDRO_15*DENDRO_73; const double DENDRO_86 = At1[pp]*DENDRO_22*DENDRO_25; const double DENDRO_87 = At2[pp]*DENDRO_75; const double DENDRO_88 = At3[pp]*DENDRO_22; const double DENDRO_89 = DENDRO_15*DENDRO_88; const double DENDRO_90 = DENDRO_79 + DENDRO_80 - DENDRO_81 + DENDRO_83 - DENDRO_84 - DENDRO_85 + DENDRO_86 - DENDRO_87 - DENDRO_89; const double DENDRO_91 = grad_0_gt4[pp]; const double DENDRO_92 = grad_2_gt1[pp]; const double DENDRO_93 = grad_1_gt2[pp]; const double DENDRO_94 = DENDRO_91 + DENDRO_92 - DENDRO_93; const double DENDRO_95 = DENDRO_15*DENDRO_94 - DENDRO_25*DENDRO_45 - DENDRO_66*DENDRO_9; const double DENDRO_96 = 2.0*DENDRO_48*DENDRO_77*alpha[pp]; const double DENDRO_97 = grad_2_K[pp]; const double DENDRO_98 = 4*gt1[pp]*gt4[pp] - 4*gt2[pp]*gt3[pp]; const double DENDRO_99 = 9*DENDRO_40*DENDRO_7*grad_2_chi[pp]; const double DENDRO_100 = DENDRO_91 - DENDRO_92 + DENDRO_93; const double DENDRO_101 = -DENDRO_100*DENDRO_9 + DENDRO_15*DENDRO_54 - DENDRO_25*DENDRO_43; const double DENDRO_102 = 2.0*DENDRO_48*DENDRO_90*alpha[pp]; const double DENDRO_103 = -DENDRO_91 + DENDRO_92 + DENDRO_93; const double DENDRO_104 = -DENDRO_103*DENDRO_25 + DENDRO_15*DENDRO_52 - DENDRO_64*DENDRO_9; const double DENDRO_105 = At4[pp]*DENDRO_57; const double DENDRO_106 = At1[pp]*DENDRO_58; const double DENDRO_107 = At0[pp]*DENDRO_34; const double DENDRO_108 = At1[pp]*DENDRO_82; const double DENDRO_109 = At2[pp]*DENDRO_69; const double DENDRO_110 = At2[pp]*DENDRO_76; const double DENDRO_111 = DENDRO_1*DENDRO_88; const double DENDRO_112 = At4[pp]*DENDRO_20*DENDRO_22; const double DENDRO_113 = DENDRO_1*DENDRO_74; const double DENDRO_114 = DENDRO_105 + DENDRO_106 - DENDRO_107 + DENDRO_108 - DENDRO_109 - DENDRO_110 - DENDRO_111 + DENDRO_112 - DENDRO_113; const double DENDRO_115 = 2.0*DENDRO_114*DENDRO_48*alpha[pp]; const double DENDRO_116 = grad_1_K[pp]; const double DENDRO_117 = 4*gt1[pp]*gt5[pp] - 4*gt2[pp]*gt4[pp]; const double DENDRO_118 = 9*DENDRO_40*DENDRO_7*grad_1_chi[pp]; const double DENDRO_119 = DENDRO_1*DENDRO_64 + DENDRO_103*DENDRO_15 - DENDRO_22*DENDRO_52; const double DENDRO_120 = DENDRO_1*DENDRO_119; const double DENDRO_121 = DENDRO_1*DENDRO_100 + DENDRO_15*DENDRO_43 - DENDRO_22*DENDRO_54; const double DENDRO_122 = DENDRO_121*DENDRO_15; const double DENDRO_123 = DENDRO_1*DENDRO_66 + DENDRO_15*DENDRO_45 - DENDRO_22*DENDRO_94; const double DENDRO_124 = DENDRO_123*DENDRO_9; const double DENDRO_125 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_126 = DENDRO_125*DENDRO_62 - DENDRO_15*DENDRO_67 + DENDRO_22*DENDRO_65; const double DENDRO_127 = DENDRO_126*DENDRO_20; const double DENDRO_128 = 0.5*DENDRO_22*DENDRO_50; const double DENDRO_129 = DENDRO_1*DENDRO_53; const double DENDRO_130 = DENDRO_15*DENDRO_55; const double DENDRO_131 = DENDRO_22*(-DENDRO_128 + DENDRO_129 - DENDRO_130); const double DENDRO_132 = DENDRO_1*DENDRO_46 - DENDRO_22*DENDRO_44 + DENDRO_42*DENDRO_51; const double DENDRO_133 = DENDRO_132*DENDRO_25; const double DENDRO_134 = DENDRO_30*(DENDRO_120 + DENDRO_122 - 1.0*DENDRO_124 - DENDRO_127 - DENDRO_131 - DENDRO_133); const double DENDRO_135 = DENDRO_1*DENDRO_52 - DENDRO_103*DENDRO_9 - DENDRO_20*DENDRO_64; const double DENDRO_136 = DENDRO_1*DENDRO_135; const double DENDRO_137 = DENDRO_1*DENDRO_54 - DENDRO_100*DENDRO_20 - DENDRO_43*DENDRO_9; const double DENDRO_138 = DENDRO_137*DENDRO_15; const double DENDRO_139 = DENDRO_1*DENDRO_94 - DENDRO_20*DENDRO_66 - DENDRO_45*DENDRO_9; const double DENDRO_140 = DENDRO_139*DENDRO_9; const double DENDRO_141 = -DENDRO_1*DENDRO_65 - 0.5*DENDRO_20*DENDRO_62 + DENDRO_67*DENDRO_9; const double DENDRO_142 = DENDRO_141*DENDRO_20; const double DENDRO_143 = DENDRO_125*DENDRO_50 - DENDRO_20*DENDRO_53 + DENDRO_55*DENDRO_9; const double DENDRO_144 = DENDRO_143*DENDRO_22; const double DENDRO_145 = DENDRO_1*DENDRO_44 - DENDRO_20*DENDRO_46 - DENDRO_42*DENDRO_63; const double DENDRO_146 = DENDRO_145*DENDRO_25; const double DENDRO_147 = DENDRO_30*(DENDRO_136 + DENDRO_138 - 1.0*DENDRO_140 - DENDRO_142 - DENDRO_144 - DENDRO_146); const double DENDRO_148 = grad_0_beta0[pp]; const double DENDRO_149 = DENDRO_1*DENDRO_104; const double DENDRO_150 = DENDRO_101*DENDRO_15; const double DENDRO_151 = DENDRO_9*DENDRO_95; const double DENDRO_152 = DENDRO_20*DENDRO_68; const double DENDRO_153 = DENDRO_22*DENDRO_56; const double DENDRO_154 = DENDRO_25*DENDRO_47; const double DENDRO_155 = DENDRO_30*(DENDRO_149 + DENDRO_150 - 1.0*DENDRO_151 - DENDRO_152 - DENDRO_153 - DENDRO_154); const double DENDRO_156 = grad_1_beta1[pp]; const double DENDRO_157 = grad_2_beta2[pp]; const double DENDRO_158 = (2.0L/3.0L)*DENDRO_148 + (2.0L/3.0L)*DENDRO_156 + (2.0L/3.0L)*DENDRO_157; const double DENDRO_159 = (1.0L/3.0L)*DENDRO_1*DENDRO_7; const double DENDRO_160 = (7.0L/3.0L)*DENDRO_1*DENDRO_7; const double DENDRO_161 = (1.0L/3.0L)*DENDRO_22*DENDRO_7; const double DENDRO_162 = -DENDRO_105 - DENDRO_106 + DENDRO_107 - DENDRO_108 + DENDRO_109 + DENDRO_110 + DENDRO_111 - DENDRO_112 + DENDRO_113; const double DENDRO_163 = -DENDRO_79 - DENDRO_80 + DENDRO_81 - DENDRO_83 + DENDRO_84 + DENDRO_85 - DENDRO_86 + DENDRO_87 + DENDRO_89; const double DENDRO_164 = 2.0*DENDRO_48*alpha[pp]; const double DENDRO_165 = 4*gt0[pp]*gt4[pp] - 4*gt1[pp]*gt2[pp]; const double DENDRO_166 = DENDRO_30*(-1.0*DENDRO_120 - 1.0*DENDRO_122 + DENDRO_124 + DENDRO_127 + DENDRO_131 + DENDRO_133); const double DENDRO_167 = (1.0L/3.0L)*DENDRO_20*DENDRO_7; // Dendro: printing variables Gt_rhs0[pp] = 2*DENDRO_1*DENDRO_7*grad2_1_2_beta0[pp] - DENDRO_10*DENDRO_8 + DENDRO_101*DENDRO_102 + DENDRO_104*DENDRO_115 - DENDRO_11*DENDRO_12 - DENDRO_12*DENDRO_13 - DENDRO_134*grad_1_beta0[pp] + DENDRO_14*DENDRO_16 - DENDRO_147*grad_2_beta0[pp] - DENDRO_148*DENDRO_155 + DENDRO_155*DENDRO_158 + DENDRO_17*DENDRO_18 + DENDRO_18*DENDRO_19 - DENDRO_21*grad2_2_2_beta0[pp] - DENDRO_23*grad2_1_1_beta0[pp] - 4.0L/3.0L*DENDRO_24*DENDRO_26 - DENDRO_27*DENDRO_28 - DENDRO_28*DENDRO_29 - DENDRO_31*DENDRO_37 - DENDRO_38*(DENDRO_116*DENDRO_117 + DENDRO_118*DENDRO_90) - DENDRO_38*(-4*DENDRO_25*DENDRO_39 + DENDRO_37*DENDRO_41) - DENDRO_38*(DENDRO_77*DENDRO_99 - DENDRO_97*DENDRO_98) + DENDRO_47*DENDRO_49 + DENDRO_56*DENDRO_61 + DENDRO_68*DENDRO_71 - DENDRO_72*DENDRO_77 - DENDRO_78*DENDRO_90 + DENDRO_95*DENDRO_96 + beta0[pp]*agrad_0_Gt0[pp] + beta1[pp]*agrad_1_Gt0[pp] + beta2[pp]*agrad_2_Gt0[pp]; Gt_rhs1[pp] = DENDRO_11*DENDRO_160 - DENDRO_119*DENDRO_162*DENDRO_164 - DENDRO_121*DENDRO_163*DENDRO_164 + DENDRO_123*DENDRO_96 + DENDRO_126*DENDRO_71 + DENDRO_13*DENDRO_159 + DENDRO_132*DENDRO_49 - DENDRO_14*DENDRO_161 + DENDRO_156*DENDRO_166 - DENDRO_158*DENDRO_166 + DENDRO_159*DENDRO_8 + DENDRO_16*DENDRO_27 - DENDRO_161*DENDRO_19 + DENDRO_162*DENDRO_72 + DENDRO_163*DENDRO_31 - 4.0L/3.0L*DENDRO_17*DENDRO_23 + DENDRO_18*DENDRO_24 + DENDRO_18*DENDRO_29 - DENDRO_21*grad2_2_2_beta1[pp] - DENDRO_26*grad2_0_0_beta1[pp] + DENDRO_30*(-1.0*DENDRO_136 - 1.0*DENDRO_138 + DENDRO_140 + DENDRO_142 + DENDRO_144 + DENDRO_146)*grad_2_beta1[pp] + DENDRO_30*(-1.0*DENDRO_149 - 1.0*DENDRO_150 + DENDRO_151 + DENDRO_152 + DENDRO_153 + DENDRO_154)*grad_0_beta1[pp] + DENDRO_38*(4*DENDRO_116*DENDRO_22 - DENDRO_118*DENDRO_60) - DENDRO_38*(DENDRO_117*DENDRO_39 - DENDRO_163*DENDRO_41) - DENDRO_38*(-DENDRO_162*DENDRO_99 + DENDRO_165*DENDRO_97) - DENDRO_60*DENDRO_78 - DENDRO_61*(DENDRO_128 - DENDRO_129 + DENDRO_130) - 2*DENDRO_7*DENDRO_9*grad2_0_2_beta1[pp] + beta0[pp]*agrad_0_Gt1[pp] + beta1[pp]*agrad_1_Gt1[pp] + beta2[pp]*agrad_2_Gt1[pp]; Gt_rhs2[pp] = -DENDRO_10*DENDRO_29 + DENDRO_102*DENDRO_137 - DENDRO_11*DENDRO_167 - DENDRO_114*DENDRO_78 + DENDRO_115*DENDRO_135 - DENDRO_12*DENDRO_24 - DENDRO_12*DENDRO_27 - 4.0L/3.0L*DENDRO_13*DENDRO_21 - DENDRO_134*grad_1_beta2[pp] + DENDRO_139*DENDRO_96 + DENDRO_14*DENDRO_159 + DENDRO_141*DENDRO_71 + DENDRO_143*DENDRO_61 + DENDRO_145*DENDRO_49 - DENDRO_147*DENDRO_157 + DENDRO_147*DENDRO_158 + 2*DENDRO_15*DENDRO_7*grad2_0_1_beta2[pp] - DENDRO_155*grad_0_beta2[pp] + DENDRO_159*DENDRO_17 + DENDRO_160*DENDRO_19 - DENDRO_167*DENDRO_8 - DENDRO_23*grad2_1_1_beta2[pp] - DENDRO_26*grad2_0_0_beta2[pp] - DENDRO_31*DENDRO_77 - DENDRO_38*(DENDRO_114*DENDRO_118 + DENDRO_116*DENDRO_165) - DENDRO_38*(-4*DENDRO_20*DENDRO_97 + DENDRO_70*DENDRO_99) - DENDRO_38*(-DENDRO_39*DENDRO_98 + DENDRO_41*DENDRO_77) - DENDRO_70*DENDRO_72 + beta0[pp]*agrad_0_Gt2[pp] + beta1[pp]*agrad_1_Gt2[pp] + beta2[pp]*agrad_2_Gt2[pp]; // Dendro: reduced ops: 732 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(Gt_rhs0, &__unzipOutVar[cuda::VAR::U_GT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(Gt_rhs2, &__unzipOutVar[cuda::VAR::U_GT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(Gt_rhs1, &__unzipOutVar[cuda::VAR::U_GT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_Gt_rhs /**@brief compute B_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_B_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for B_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 27; double * gt3 = __sm_base + 54; double * At1 = __sm_base + 81; double * gt5 = __sm_base + 108; double * gt4 = __sm_base + 135; double * alpha = __sm_base + 162; double * gt2 = __sm_base + 189; double * At3 = __sm_base + 216; double * beta2 = __sm_base + 243; double * B2 = __sm_base + 270; double * At4 = __sm_base + 297; double * At0 = __sm_base + 324; double * At2 = __sm_base + 351; double * beta0 = __sm_base + 378; double * gt0 = __sm_base + 405; double * chi = __sm_base + 432; double * B1 = __sm_base + 459; double * B0 = __sm_base + 486; double * At5 = __sm_base + 513; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 540; double * grad_1_gt0 = __sm_base + 567; double * agrad_0_B1 = __sm_base + 594; double * grad2_1_2_beta2 = __sm_base + 621; double * grad_2_K = __sm_base + 648; double * grad_0_beta0 = __sm_base + 675; double * grad_2_gt0 = __sm_base + 702; double * grad_0_gt4 = __sm_base + 729; double * grad2_1_1_beta1 = __sm_base + 756; double * grad_2_gt3 = __sm_base + 783; double * grad2_0_1_beta0 = __sm_base + 810; double * grad2_0_0_beta1 = __sm_base + 837; double * grad_1_K = __sm_base + 864; double * grad_1_alpha = __sm_base + 891; double * grad2_0_2_beta1 = __sm_base + 918; double * grad2_0_1_beta1 = __sm_base + 945; double * grad2_2_2_beta2 = __sm_base + 972; double * grad_1_beta1 = __sm_base + 999; double * grad_2_alpha = __sm_base + 1026; double * grad_2_beta0 = __sm_base + 1053; double * grad_0_gt1 = __sm_base + 1080; double * grad_1_gt5 = __sm_base + 1107; double * agrad_2_B0 = __sm_base + 1134; double * agrad_2_Gt1 = __sm_base + 1161; double * agrad_1_B1 = __sm_base + 1188; double * agrad_0_Gt2 = __sm_base + 1215; double * grad2_1_1_beta2 = __sm_base + 1242; double * grad_1_gt4 = __sm_base + 1269; double * grad2_2_2_beta1 = __sm_base + 1296; double * grad_1_gt1 = __sm_base + 1323; double * agrad_2_B1 = __sm_base + 1350; double * grad_2_gt4 = __sm_base + 1377; double * grad_0_beta1 = __sm_base + 1404; double * agrad_1_B0 = __sm_base + 1431; double * grad_0_alpha = __sm_base + 1458; double * agrad_0_B0 = __sm_base + 1485; double * grad_0_chi = __sm_base + 1512; double * grad_2_beta2 = __sm_base + 1539; double * grad2_1_2_beta0 = __sm_base + 1566; double * grad2_1_1_beta0 = __sm_base + 1593; double * agrad_0_Gt1 = __sm_base + 1620; double * grad_0_K = __sm_base + 1647; double * agrad_1_B2 = __sm_base + 1674; double * grad2_0_2_beta2 = __sm_base + 1701; double * agrad_2_B2 = __sm_base + 1728; double * agrad_0_Gt0 = __sm_base + 1755; double * agrad_1_Gt1 = __sm_base + 1782; double * grad2_0_0_beta0 = __sm_base + 1809; double * agrad_1_Gt2 = __sm_base + 1836; double * agrad_1_Gt0 = __sm_base + 1863; double * grad_2_gt2 = __sm_base + 1890; double * grad_1_chi = __sm_base + 1917; double * grad_0_gt0 = __sm_base + 1944; double * agrad_0_B2 = __sm_base + 1971; double * grad_0_gt3 = __sm_base + 1998; double * grad2_2_2_beta0 = __sm_base + 2025; double * agrad_2_Gt2 = __sm_base + 2052; double * grad_2_beta1 = __sm_base + 2079; double * grad_2_gt5 = __sm_base + 2106; double * grad_1_beta0 = __sm_base + 2133; double * grad2_0_1_beta2 = __sm_base + 2160; double * grad_1_gt3 = __sm_base + 2187; double * grad2_0_2_beta0 = __sm_base + 2214; double * grad_2_chi = __sm_base + 2241; double * grad_2_gt1 = __sm_base + 2268; double * agrad_2_Gt0 = __sm_base + 2295; double * grad_0_gt2 = __sm_base + 2322; double * grad_0_beta2 = __sm_base + 2349; double * grad_1_beta2 = __sm_base + 2376; double * grad2_0_0_beta2 = __sm_base + 2403; double * grad_1_gt2 = __sm_base + 2430; double * grad2_1_2_beta1 = __sm_base + 2457; // deriv vars end // output vars begin double * B_rhs1 = __sm_base + 2484; double * B_rhs0 = __sm_base + 2511; double * B_rhs2 = __sm_base + 2538; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 17226 // Dendro: printing temp variables const double DENDRO_0 = beta0[pp]*agrad_0_Gt0[pp] + beta1[pp]*agrad_1_Gt0[pp] + beta2[pp]*agrad_2_Gt0[pp]; const double DENDRO_1 = 2*gt0[pp]*gt4[pp] - 2*gt1[pp]*gt2[pp]; const double DENDRO_2 = pow(gt4[pp], 2); const double DENDRO_3 = pow(gt1[pp], 2); const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = gt0[pp]*gt3[pp]; const double DENDRO_6 = gt1[pp]*gt2[pp]; const double DENDRO_7 = DENDRO_2*gt0[pp] + DENDRO_3*gt5[pp] + DENDRO_4*gt3[pp] - DENDRO_5*gt5[pp] - 2*DENDRO_6*gt4[pp]; const double DENDRO_8 = 1.0/DENDRO_7; const double DENDRO_9 = grad2_0_2_beta0[pp]; const double DENDRO_10 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_11 = (7.0L/3.0L)*DENDRO_10*DENDRO_8; const double DENDRO_12 = grad2_1_2_beta1[pp]; const double DENDRO_13 = (1.0L/3.0L)*DENDRO_10*DENDRO_8; const double DENDRO_14 = grad2_2_2_beta2[pp]; const double DENDRO_15 = grad2_0_1_beta0[pp]; const double DENDRO_16 = gt1[pp]*gt5[pp] - gt2[pp]*gt4[pp]; const double DENDRO_17 = (7.0L/3.0L)*DENDRO_16*DENDRO_8; const double DENDRO_18 = grad2_1_1_beta1[pp]; const double DENDRO_19 = (1.0L/3.0L)*DENDRO_16*DENDRO_8; const double DENDRO_20 = grad2_1_2_beta2[pp]; const double DENDRO_21 = -DENDRO_3 + DENDRO_5; const double DENDRO_22 = DENDRO_21*DENDRO_8; const double DENDRO_23 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_24 = DENDRO_23*DENDRO_8; const double DENDRO_25 = grad2_0_0_beta0[pp]; const double DENDRO_26 = -DENDRO_2 + gt3[pp]*gt5[pp]; const double DENDRO_27 = DENDRO_26*DENDRO_8; const double DENDRO_28 = grad2_0_1_beta1[pp]; const double DENDRO_29 = (1.0L/3.0L)*DENDRO_26*DENDRO_8; const double DENDRO_30 = grad2_0_2_beta2[pp]; const double DENDRO_31 = pow(DENDRO_7, -2); const double DENDRO_32 = 2*DENDRO_31*grad_0_alpha[pp]; const double DENDRO_33 = pow(DENDRO_16, 2); const double DENDRO_34 = pow(DENDRO_10, 2); const double DENDRO_35 = 2*gt1[pp]*gt5[pp] - 2*gt2[pp]*gt4[pp]; const double DENDRO_36 = 2*gt1[pp]*gt4[pp] - 2*gt2[pp]*gt3[pp]; const double DENDRO_37 = At0[pp]*pow(DENDRO_26, 2) - At1[pp]*DENDRO_26*DENDRO_35 + At2[pp]*DENDRO_26*DENDRO_36 + At3[pp]*DENDRO_33 - At4[pp]*DENDRO_10*DENDRO_35 + At5[pp]*DENDRO_34; const double DENDRO_38 = grad_2_chi[pp]; const double DENDRO_39 = grad_1_chi[pp]; const double DENDRO_40 = grad_0_chi[pp]; const double DENDRO_41 = 2*DENDRO_38; const double DENDRO_42 = -DENDRO_6 + gt0[pp]*gt4[pp]; const double DENDRO_43 = R0*sqrt(DENDRO_8*(-DENDRO_10*DENDRO_40*DENDRO_41 - DENDRO_21*pow(DENDRO_38, 2) - DENDRO_23*pow(DENDRO_39, 2) - DENDRO_26*pow(DENDRO_40, 2) + DENDRO_35*DENDRO_39*DENDRO_40 + DENDRO_39*DENDRO_41*DENDRO_42))*pow(-pow(chi[pp], eta_power[0]) + 1, -eta_power[1]); const double DENDRO_44 = (1.0L/3.0L)*DENDRO_8*alpha[pp]; const double DENDRO_45 = grad_0_K[pp]; const double DENDRO_46 = 1.0/chi[pp]; const double DENDRO_47 = 9*DENDRO_40*DENDRO_46*DENDRO_8; const double DENDRO_48 = grad_0_gt0[pp]; const double DENDRO_49 = grad_1_gt0[pp]; const double DENDRO_50 = -0.5*DENDRO_49 + 1.0*grad_0_gt1[pp]; const double DENDRO_51 = grad_2_gt0[pp]; const double DENDRO_52 = -0.5*DENDRO_51 + 1.0*grad_0_gt2[pp]; const double DENDRO_53 = -DENDRO_10*DENDRO_52 + DENDRO_16*DENDRO_50 - 0.5*DENDRO_26*DENDRO_48; const double DENDRO_54 = pow(DENDRO_7, -3); const double DENDRO_55 = 2*DENDRO_37*DENDRO_54*alpha[pp]; const double DENDRO_56 = grad_1_gt3[pp]; const double DENDRO_57 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_58 = grad_2_gt3[pp]; const double DENDRO_59 = -0.5*DENDRO_58 + 1.0*grad_1_gt4[pp]; const double DENDRO_60 = grad_0_gt3[pp]; const double DENDRO_61 = 0.5*DENDRO_60 - 1.0*grad_1_gt1[pp]; const double DENDRO_62 = -DENDRO_10*DENDRO_59 + DENDRO_26*DENDRO_61 + DENDRO_56*DENDRO_57; const double DENDRO_63 = pow(DENDRO_42, 2); const double DENDRO_64 = At1[pp]*DENDRO_23; const double DENDRO_65 = At0[pp]*DENDRO_33 + At2[pp]*DENDRO_35*DENDRO_42 + At3[pp]*pow(DENDRO_23, 2) - At4[pp]*DENDRO_1*DENDRO_23 + At5[pp]*DENDRO_63 - DENDRO_35*DENDRO_64; const double DENDRO_66 = 2*DENDRO_54*DENDRO_65*alpha[pp]; const double DENDRO_67 = grad_2_gt5[pp]; const double DENDRO_68 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_69 = grad_1_gt5[pp]; const double DENDRO_70 = 0.5*DENDRO_69 - 1.0*grad_2_gt4[pp]; const double DENDRO_71 = grad_0_gt5[pp]; const double DENDRO_72 = 0.5*DENDRO_71 - 1.0*grad_2_gt2[pp]; const double DENDRO_73 = -DENDRO_16*DENDRO_70 + DENDRO_26*DENDRO_72 - DENDRO_67*DENDRO_68; const double DENDRO_74 = DENDRO_10*DENDRO_42; const double DENDRO_75 = At2[pp]*DENDRO_21; const double DENDRO_76 = At4[pp]*DENDRO_21; const double DENDRO_77 = At0[pp]*DENDRO_34 - 2*At1[pp]*DENDRO_74 + At3[pp]*DENDRO_63 + At5[pp]*pow(DENDRO_21, 2) - DENDRO_1*DENDRO_76 + DENDRO_36*DENDRO_75; const double DENDRO_78 = 2*DENDRO_54*DENDRO_77*alpha[pp]; const double DENDRO_79 = 2*DENDRO_31*grad_2_alpha[pp]; const double DENDRO_80 = DENDRO_16*DENDRO_42; const double DENDRO_81 = At0[pp]*DENDRO_26; const double DENDRO_82 = DENDRO_10*DENDRO_16; const double DENDRO_83 = At5[pp]*DENDRO_21; const double DENDRO_84 = DENDRO_26*DENDRO_42; const double DENDRO_85 = DENDRO_16*DENDRO_21; const double DENDRO_86 = -At1[pp]*DENDRO_82 - At1[pp]*DENDRO_84 + At2[pp]*DENDRO_34 + At3[pp]*DENDRO_80 - At4[pp]*DENDRO_74 - At4[pp]*DENDRO_85 + DENDRO_10*DENDRO_81 + DENDRO_10*DENDRO_83 + DENDRO_26*DENDRO_75; const double DENDRO_87 = 2*DENDRO_31*grad_1_alpha[pp]; const double DENDRO_88 = DENDRO_10*DENDRO_23; const double DENDRO_89 = At3[pp]*DENDRO_23; const double DENDRO_90 = At1[pp]*DENDRO_33 - At2[pp]*DENDRO_82 - At2[pp]*DENDRO_84 + At4[pp]*DENDRO_80 + At4[pp]*DENDRO_88 - At5[pp]*DENDRO_74 - DENDRO_16*DENDRO_81 - DENDRO_16*DENDRO_89 + DENDRO_26*DENDRO_64; const double DENDRO_91 = grad_0_gt4[pp]; const double DENDRO_92 = grad_2_gt1[pp]; const double DENDRO_93 = grad_1_gt2[pp]; const double DENDRO_94 = DENDRO_91 + DENDRO_92 - DENDRO_93; const double DENDRO_95 = -DENDRO_10*DENDRO_71 + DENDRO_16*DENDRO_94 - DENDRO_26*DENDRO_51; const double DENDRO_96 = 2.0*DENDRO_54*DENDRO_86*alpha[pp]; const double DENDRO_97 = grad_2_K[pp]; const double DENDRO_98 = 4*gt1[pp]*gt4[pp] - 4*gt2[pp]*gt3[pp]; const double DENDRO_99 = 9*DENDRO_38*DENDRO_46*DENDRO_8; const double DENDRO_100 = DENDRO_91 - DENDRO_92 + DENDRO_93; const double DENDRO_101 = -DENDRO_10*DENDRO_100 + DENDRO_16*DENDRO_60 - DENDRO_26*DENDRO_49; const double DENDRO_102 = 2.0*DENDRO_54*DENDRO_90*alpha[pp]; const double DENDRO_103 = -DENDRO_91 + DENDRO_92 + DENDRO_93; const double DENDRO_104 = -DENDRO_10*DENDRO_69 - DENDRO_103*DENDRO_26 + DENDRO_16*DENDRO_58; const double DENDRO_105 = -At0[pp]*DENDRO_82 + At1[pp]*DENDRO_80 + At1[pp]*DENDRO_88 - At2[pp]*DENDRO_74 - At2[pp]*DENDRO_85 + At4[pp]*DENDRO_63 + DENDRO_23*DENDRO_76 - DENDRO_42*DENDRO_83 - DENDRO_42*DENDRO_89; const double DENDRO_106 = 2.0*DENDRO_105*DENDRO_54*alpha[pp]; const double DENDRO_107 = grad_1_K[pp]; const double DENDRO_108 = 4*gt1[pp]*gt5[pp] - 4*gt2[pp]*gt4[pp]; const double DENDRO_109 = 9*DENDRO_39*DENDRO_46*DENDRO_8; const double DENDRO_110 = DENDRO_103*DENDRO_16 - DENDRO_23*DENDRO_58 + DENDRO_42*DENDRO_69; const double DENDRO_111 = DENDRO_100*DENDRO_42 + DENDRO_16*DENDRO_49 - DENDRO_23*DENDRO_60; const double DENDRO_112 = 1.0*gt1[pp]*gt4[pp] - 1.0*gt2[pp]*gt3[pp]; const double DENDRO_113 = DENDRO_16*DENDRO_51 - DENDRO_23*DENDRO_94 + DENDRO_42*DENDRO_71; const double DENDRO_114 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_115 = DENDRO_114*DENDRO_67 - DENDRO_16*DENDRO_72 + DENDRO_23*DENDRO_70; const double DENDRO_116 = -DENDRO_16*DENDRO_61 - 0.5*DENDRO_23*DENDRO_56 + DENDRO_42*DENDRO_59; const double DENDRO_117 = -DENDRO_23*DENDRO_50 + DENDRO_42*DENDRO_52 + DENDRO_48*DENDRO_57; const double DENDRO_118 = DENDRO_31*(DENDRO_110*DENDRO_42 + DENDRO_111*DENDRO_16 - DENDRO_112*DENDRO_113 - DENDRO_115*DENDRO_21 - DENDRO_116*DENDRO_23 - DENDRO_117*DENDRO_26); const double DENDRO_119 = -DENDRO_10*DENDRO_103 - DENDRO_21*DENDRO_69 + DENDRO_42*DENDRO_58; const double DENDRO_120 = -DENDRO_10*DENDRO_49 - DENDRO_100*DENDRO_21 + DENDRO_42*DENDRO_60; const double DENDRO_121 = -DENDRO_10*DENDRO_51 - DENDRO_21*DENDRO_71 + DENDRO_42*DENDRO_94; const double DENDRO_122 = DENDRO_10*DENDRO_72 - 0.5*DENDRO_21*DENDRO_67 - DENDRO_42*DENDRO_70; const double DENDRO_123 = DENDRO_10*DENDRO_61 + DENDRO_114*DENDRO_56 - DENDRO_21*DENDRO_59; const double DENDRO_124 = -DENDRO_21*DENDRO_52 + DENDRO_42*DENDRO_50 - DENDRO_48*DENDRO_68; const double DENDRO_125 = DENDRO_31*(-DENDRO_112*DENDRO_121 + DENDRO_119*DENDRO_42 + DENDRO_120*DENDRO_16 - DENDRO_122*DENDRO_21 - DENDRO_123*DENDRO_23 - DENDRO_124*DENDRO_26); const double DENDRO_126 = grad_0_beta0[pp]; const double DENDRO_127 = DENDRO_31*(DENDRO_101*DENDRO_16 + DENDRO_104*DENDRO_42 - DENDRO_112*DENDRO_95 - DENDRO_21*DENDRO_73 - DENDRO_23*DENDRO_62 - DENDRO_26*DENDRO_53); const double DENDRO_128 = grad_1_beta1[pp]; const double DENDRO_129 = grad_2_beta2[pp]; const double DENDRO_130 = (2.0L/3.0L)*DENDRO_126 + (2.0L/3.0L)*DENDRO_128 + (2.0L/3.0L)*DENDRO_129; const double DENDRO_131 = beta0[pp]*agrad_0_Gt1[pp] + beta1[pp]*agrad_1_Gt1[pp] + beta2[pp]*agrad_2_Gt1[pp]; const double DENDRO_132 = (1.0L/3.0L)*DENDRO_42*DENDRO_8; const double DENDRO_133 = (7.0L/3.0L)*DENDRO_42*DENDRO_8; const double DENDRO_134 = (1.0L/3.0L)*DENDRO_23*DENDRO_8; const double DENDRO_135 = 4*gt0[pp]*gt4[pp] - 4*gt1[pp]*gt2[pp]; const double DENDRO_136 = beta0[pp]*agrad_0_Gt2[pp] + beta1[pp]*agrad_1_Gt2[pp] + beta2[pp]*agrad_2_Gt2[pp]; const double DENDRO_137 = (1.0L/3.0L)*DENDRO_21*DENDRO_8; // Dendro: printing variables B_rhs0[pp] = -B0[pp]*DENDRO_43 - DENDRO_0*lambda[3] + DENDRO_0 + DENDRO_1*DENDRO_8*grad2_1_2_beta0[pp] + DENDRO_101*DENDRO_102 + DENDRO_104*DENDRO_106 - DENDRO_11*DENDRO_9 - DENDRO_118*grad_1_beta0[pp] - DENDRO_12*DENDRO_13 - DENDRO_125*grad_2_beta0[pp] - DENDRO_126*DENDRO_127 + DENDRO_127*DENDRO_130 - DENDRO_13*DENDRO_14 + DENDRO_15*DENDRO_17 + DENDRO_18*DENDRO_19 + DENDRO_19*DENDRO_20 - DENDRO_22*grad2_2_2_beta0[pp] - DENDRO_24*grad2_1_1_beta0[pp] - 4.0L/3.0L*DENDRO_25*DENDRO_27 - DENDRO_28*DENDRO_29 - DENDRO_29*DENDRO_30 - DENDRO_32*DENDRO_37 - DENDRO_44*(DENDRO_107*DENDRO_108 + DENDRO_109*DENDRO_90) - DENDRO_44*(-4*DENDRO_26*DENDRO_45 + DENDRO_37*DENDRO_47) - DENDRO_44*(DENDRO_86*DENDRO_99 - DENDRO_97*DENDRO_98) + DENDRO_53*DENDRO_55 + DENDRO_62*DENDRO_66 + DENDRO_73*DENDRO_78 - DENDRO_79*DENDRO_86 - DENDRO_87*DENDRO_90 + DENDRO_95*DENDRO_96 + lambda[2]*(beta0[pp]*agrad_0_B0[pp] + beta1[pp]*agrad_1_B0[pp] + beta2[pp]*agrad_2_B0[pp]); B_rhs1[pp] = -B1[pp]*DENDRO_43 + DENDRO_102*DENDRO_111 - DENDRO_105*DENDRO_79 + DENDRO_106*DENDRO_110 + DENDRO_113*DENDRO_96 + DENDRO_115*DENDRO_78 + DENDRO_116*DENDRO_66 + DENDRO_117*DENDRO_55 - DENDRO_118*DENDRO_128 + DENDRO_118*DENDRO_130 + DENDRO_12*DENDRO_133 - DENDRO_125*grad_2_beta1[pp] - DENDRO_127*grad_0_beta1[pp] - DENDRO_131*lambda[3] + DENDRO_131 + DENDRO_132*DENDRO_14 + DENDRO_132*DENDRO_9 - DENDRO_134*DENDRO_15 - DENDRO_134*DENDRO_20 + DENDRO_17*DENDRO_28 - 4.0L/3.0L*DENDRO_18*DENDRO_24 + DENDRO_19*DENDRO_25 + DENDRO_19*DENDRO_30 - DENDRO_22*grad2_2_2_beta1[pp] - DENDRO_27*grad2_0_0_beta1[pp] - DENDRO_32*DENDRO_90 - DENDRO_36*DENDRO_8*grad2_0_2_beta1[pp] - DENDRO_44*(DENDRO_105*DENDRO_99 + DENDRO_135*DENDRO_97) - DENDRO_44*(-4*DENDRO_107*DENDRO_23 + DENDRO_109*DENDRO_65) - DENDRO_44*(DENDRO_108*DENDRO_45 + DENDRO_47*DENDRO_90) - DENDRO_65*DENDRO_87 + lambda[2]*(beta0[pp]*agrad_0_B1[pp] + beta1[pp]*agrad_1_B1[pp] + beta2[pp]*agrad_2_B1[pp]); B_rhs2[pp] = -B2[pp]*DENDRO_43 + DENDRO_102*DENDRO_120 - DENDRO_105*DENDRO_87 + DENDRO_106*DENDRO_119 - DENDRO_11*DENDRO_30 - DENDRO_118*grad_1_beta2[pp] - DENDRO_12*DENDRO_137 + DENDRO_121*DENDRO_96 + DENDRO_122*DENDRO_78 + DENDRO_123*DENDRO_66 + DENDRO_124*DENDRO_55 - DENDRO_125*DENDRO_129 + DENDRO_125*DENDRO_130 - DENDRO_127*grad_0_beta2[pp] - DENDRO_13*DENDRO_25 - DENDRO_13*DENDRO_28 + DENDRO_132*DENDRO_15 + DENDRO_132*DENDRO_18 + DENDRO_133*DENDRO_20 - DENDRO_136*lambda[3] + DENDRO_136 - DENDRO_137*DENDRO_9 - 4.0L/3.0L*DENDRO_14*DENDRO_22 - DENDRO_24*grad2_1_1_beta2[pp] - DENDRO_27*grad2_0_0_beta2[pp] - DENDRO_32*DENDRO_86 + DENDRO_35*DENDRO_8*grad2_0_1_beta2[pp] - DENDRO_44*(DENDRO_105*DENDRO_109 + DENDRO_107*DENDRO_135) - DENDRO_44*(-4*DENDRO_21*DENDRO_97 + DENDRO_77*DENDRO_99) - DENDRO_44*(-DENDRO_45*DENDRO_98 + DENDRO_47*DENDRO_86) - DENDRO_77*DENDRO_79 + lambda[2]*(beta0[pp]*agrad_0_B2[pp] + beta1[pp]*agrad_1_B2[pp] + beta2[pp]*agrad_2_B2[pp]); // Dendro: reduced ops: 765 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(B_rhs1, &__unzipOutVar[cuda::VAR::U_B1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(B_rhs0, &__unzipOutVar[cuda::VAR::U_B0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(B_rhs2, &__unzipOutVar[cuda::VAR::U_B2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_B_rhs /**@brief apply KO dissipation @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __ko_dissipation(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const hipDeviceProp_t* __deviceProperties, double* __sm_base, unsigned int stream_id){ // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={10,10,10}; double * kograd_0 = __sm_base + 0; double * kograd_1 = __sm_base + 1000; double * kograd_2 = __sm_base + 2000; double * unZipSharedOut = __sm_base + 3000; const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); unsigned int pp; //ko dissipation for variable alpha cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_ALPHA][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_ALPHA][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt3 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt4 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt5 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable chi cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_CHI][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_CHI][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At3 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At4 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At5 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable K cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_K][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_K][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z }// end of function __ko_dissipation }// end of namespace cuda
6c36ae4398ffe9cf6d6dc7d79e932a4e40643088.cu
// generated by Dendro-GR SymPyGR code gernation framework //date: 2018-10-14 00:09:24 #include "rhs_bssn.cuh" namespace cuda { /**@brief compute RHS @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __global__ void __computeBSSNRHS(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* __dendroBlkList, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams ,const cudaDeviceProp* __deviceProperties, unsigned int stream_id){ // shared memory allocation for deriv and rhs computation __shared__ double __sm_base[5248]; __shared__ bool beta0_bool[1728]; __shared__ bool beta1_bool[1728]; __shared__ bool beta2_bool[1728]; for(unsigned int blk=__gpuBlockMap[2*blockIdx.x];blk<__gpuBlockMap[2*blockIdx.x+1];++blk){ // blocks assigned to each gpu block const _Block * dblock=&__dendroBlkList[blk]; // compute the derivatives __compute_derivatives(__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,beta0_bool,beta1_bool,beta2_bool,stream_id); __syncthreads(); // compute the RHS __compute_a_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_b_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_gt_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_chi_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_At_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_K_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_Gt_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __compute_B_rhs(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); __ko_dissipation(__unzipOutVar,__unzipInVar,__derivWorkspace,dblock,__gpuBlockMap,__bssnParams,__deviceProperties,__sm_base,stream_id); __syncthreads(); }// end of the block loop } // end of kernel /**@brief compute derivs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties */ __device__ void __compute_derivatives(const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, bool* beta0_bool, bool* beta1_bool, bool* beta2_bool, unsigned int stream_id){ const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={12,12,12}; double * unzipVarInShared = __sm_base + 0; double * unzipVarOutShared0 = __sm_base + 1728; double * unzipVarOutShared1 = __sm_base + 3456; const unsigned int Lb = 0;// load begin bound const unsigned int Le = sz[0]-0;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*3)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(0,(int)(0 + tile_sz[2]*iter_z -2*iter_z*3)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-0-1); if((ijk_lm[5]-ijk_lm[4]+1)<=9) ijk_lm[4]=ijk_lm[4]-(9-(ijk_lm[5]-ijk_lm[4]+1)) ; for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(0,(int)(0 + tile_sz[1]*iter_y -2*iter_y*3)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-0-1); if((ijk_lm[3]-ijk_lm[2]+1)<=9) ijk_lm[2]=ijk_lm[2]-(9-(ijk_lm[3]-ijk_lm[2]+1)) ; for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(0,(int)(0 + tile_sz[0]*iter_x -2*iter_x*3)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-0-1); if((ijk_lm[1]-ijk_lm[0]+1)<=9) ijk_lm[0]=ijk_lm[0]-(9-(ijk_lm[1]-ijk_lm[0]+1)) ; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) unzipVarOutShared0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) unzipVarOutShared1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); cuda::__extractSign3D<double>((double *)unzipVarInShared,(bool *) beta0_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__extractSign3D<double>((double *)unzipVarOutShared0,(bool *) beta1_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__extractSign3D<double>((double *)unzipVarOutShared1,(bool *) beta2_bool,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable alpha _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable alpha _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable alpha _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable alpha _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta0 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta1 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable beta2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable beta2 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable beta2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable beta2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable B2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable B2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable B2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable chi _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable chi _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable chi _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable chi _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_GT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable Gt2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable Gt2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable Gt2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable K _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable K _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable K _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt0 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt1 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt2 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt3 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt3 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt3 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt3 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt4 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt4 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt4 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt4 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable gt5 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv xy for variable gt5 _RSWS_deriv42_y((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv xz for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_0_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable gt5 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); // computing deriv yz for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared1,(const double *) unzipVarOutShared0,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared1,&(__derivWorkspace->__grad2_1_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable gt5 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_xx((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_0_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_yy((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_1_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42_zz((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad2_2_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At0 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At0 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At0 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At1 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At1 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At1 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At2 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At2 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At2 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At3 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At3 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At3 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At4 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At4 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At4 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //load input data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) unzipVarInShared,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); //sync to make sure all the data is loaded // computing deriv x for variable At5 _RSWS_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv y for variable At5 _RSWS_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); // computing deriv z for variable At5 _RSWS_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__grad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_ko_deriv42_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz,(const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__kograd_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_x((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dx, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta0_bool , 3, bflag); __syncthreads(); tile_lm[0]=3; tile_lm[1]=(ijk_lm[1]-ijk_lm[0]); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_y((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dy, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta1_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=3; tile_lm[3]=(ijk_lm[3]-ijk_lm[2]); tile_lm[4]=(iter_z)? 3: 0; tile_lm[5]=(iter_z==(BLK_ITERATIONS_Z-1)) ? (ijk_lm[5]-ijk_lm[4]) : (ijk_lm[5]-ijk_lm[4]-3); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); _RSWS_deriv42adv_z((double *) unzipVarOutShared0,(const double *) unzipVarInShared,dz, (const unsigned int *) ijk_lm , (const unsigned int *) alignedSz , (const unsigned int *) tile_sz, (const bool*) beta2_bool , 3, bflag); __syncthreads(); tile_lm[0]=(iter_x)? 3: 0; tile_lm[1]=(iter_x==(BLK_ITERATIONS_X-1)) ? (ijk_lm[1]-ijk_lm[0]) : (ijk_lm[1]-ijk_lm[0]-3); tile_lm[2]=(iter_y)? 3: 0; tile_lm[3]=(iter_y==(BLK_ITERATIONS_Y-1)) ? (ijk_lm[3]-ijk_lm[2]) : (ijk_lm[3]-ijk_lm[2]-3); tile_lm[4]=3; tile_lm[5]=(ijk_lm[5]-ijk_lm[4]); cuda::__storeSharedToGlobal3D<double>((double *) unzipVarOutShared0,&(__derivWorkspace->__agrad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block tile loop x } // end of block tile loop y } // end of block tile loop z } // end of function __compute_derivatives /**@brief compute a_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_a_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for a_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={8,8,8}; //input vars begin double * alpha = __sm_base + 0; double * K = __sm_base + 512; double * beta0 = __sm_base + 1024; double * beta1 = __sm_base + 1536; double * beta2 = __sm_base + 2048; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_alpha = __sm_base + 2560; double * agrad_2_alpha = __sm_base + 3072; double * agrad_0_alpha = __sm_base + 3584; // deriv vars end // output vars begin double * a_rhs = __sm_base + 4096; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 12 // Dendro: printing temp variables // Dendro: printing variables a_rhs[pp] = -2*K[pp]*alpha[pp] + lambda[0]*(beta0[pp]*agrad_0_alpha[pp] + beta1[pp]*agrad_1_alpha[pp] + beta2[pp]*agrad_2_alpha[pp]); // Dendro: reduced ops: 12 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(a_rhs, &__unzipOutVar[cuda::VAR::U_ALPHA][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_a_rhs /**@brief compute b_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_b_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for b_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={6,6,6}; //input vars begin double * beta1 = __sm_base + 0; double * beta2 = __sm_base + 216; double * alpha = __sm_base + 432; double * B2 = __sm_base + 648; double * beta0 = __sm_base + 864; double * B1 = __sm_base + 1080; double * B0 = __sm_base + 1296; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_beta1 = __sm_base + 1512; double * agrad_1_beta2 = __sm_base + 1728; double * agrad_0_beta2 = __sm_base + 1944; double * agrad_2_beta0 = __sm_base + 2160; double * agrad_2_beta2 = __sm_base + 2376; double * agrad_2_beta1 = __sm_base + 2592; double * agrad_0_beta1 = __sm_base + 2808; double * agrad_1_beta0 = __sm_base + 3024; double * agrad_0_beta0 = __sm_base + 3240; // deriv vars end // output vars begin double * b_rhs0 = __sm_base + 3456; double * b_rhs2 = __sm_base + 3672; double * b_rhs1 = __sm_base + 3888; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 51 // Dendro: printing temp variables const double DENDRO_0 = (3.0L/4.0L)*alpha[pp]*lambda_f[1] + (3.0L/4.0L)*lambda_f[0]; // Dendro: printing variables b_rhs0[pp] = B0[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta0[pp] + beta1[pp]*agrad_1_beta0[pp] + beta2[pp]*agrad_2_beta0[pp]); b_rhs1[pp] = B1[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta1[pp] + beta1[pp]*agrad_1_beta1[pp] + beta2[pp]*agrad_2_beta1[pp]); b_rhs2[pp] = B2[pp]*DENDRO_0 + lambda[1]*(beta0[pp]*agrad_0_beta2[pp] + beta1[pp]*agrad_1_beta2[pp] + beta2[pp]*agrad_2_beta2[pp]); // Dendro: reduced ops: 39 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(b_rhs0, &__unzipOutVar[cuda::VAR::U_BETA0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(b_rhs2, &__unzipOutVar[cuda::VAR::U_BETA2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(b_rhs1, &__unzipOutVar[cuda::VAR::U_BETA1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_b_rhs /**@brief compute gt_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_gt_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for gt_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={4,4,4}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 64; double * gt3 = __sm_base + 128; double * beta2 = __sm_base + 192; double * At1 = __sm_base + 256; double * alpha = __sm_base + 320; double * gt4 = __sm_base + 384; double * gt2 = __sm_base + 448; double * gt5 = __sm_base + 512; double * At3 = __sm_base + 576; double * At4 = __sm_base + 640; double * At0 = __sm_base + 704; double * At2 = __sm_base + 768; double * beta0 = __sm_base + 832; double * gt0 = __sm_base + 896; double * At5 = __sm_base + 960; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_2_gt5 = __sm_base + 1024; double * agrad_2_gt1 = __sm_base + 1088; double * grad_0_beta0 = __sm_base + 1152; double * agrad_0_gt0 = __sm_base + 1216; double * agrad_1_gt3 = __sm_base + 1280; double * grad_1_beta1 = __sm_base + 1344; double * grad_2_beta0 = __sm_base + 1408; double * agrad_1_gt0 = __sm_base + 1472; double * agrad_1_gt4 = __sm_base + 1536; double * grad_0_beta1 = __sm_base + 1600; double * agrad_2_gt2 = __sm_base + 1664; double * grad_2_beta2 = __sm_base + 1728; double * agrad_2_gt0 = __sm_base + 1792; double * agrad_1_gt2 = __sm_base + 1856; double * agrad_0_gt5 = __sm_base + 1920; double * agrad_1_gt5 = __sm_base + 1984; double * agrad_0_gt3 = __sm_base + 2048; double * agrad_0_gt2 = __sm_base + 2112; double * agrad_1_gt1 = __sm_base + 2176; double * agrad_0_gt1 = __sm_base + 2240; double * grad_2_beta1 = __sm_base + 2304; double * agrad_2_gt4 = __sm_base + 2368; double * grad_1_beta0 = __sm_base + 2432; double * agrad_2_gt3 = __sm_base + 2496; double * grad_0_beta2 = __sm_base + 2560; double * grad_1_beta2 = __sm_base + 2624; double * agrad_0_gt4 = __sm_base + 2688; // deriv vars end // output vars begin double * gt_rhs01 = __sm_base + 2752; double * gt_rhs22 = __sm_base + 2816; double * gt_rhs11 = __sm_base + 2880; double * gt_rhs00 = __sm_base + 2944; double * gt_rhs12 = __sm_base + 3008; double * gt_rhs02 = __sm_base + 3072; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 210 // Dendro: printing temp variables const double DENDRO_0 = 2*alpha[pp]; const double DENDRO_1 = grad_0_beta0[pp]; const double DENDRO_2 = (2.0L/3.0L)*gt0[pp]; const double DENDRO_3 = grad_1_beta1[pp]; const double DENDRO_4 = grad_2_beta2[pp]; const double DENDRO_5 = 2*gt1[pp]; const double DENDRO_6 = grad_0_beta1[pp]; const double DENDRO_7 = 2*gt2[pp]; const double DENDRO_8 = grad_0_beta2[pp]; const double DENDRO_9 = grad_1_beta0[pp]; const double DENDRO_10 = grad_1_beta2[pp]; const double DENDRO_11 = (1.0L/3.0L)*gt1[pp]; const double DENDRO_12 = (2.0L/3.0L)*DENDRO_4; const double DENDRO_13 = grad_2_beta0[pp]; const double DENDRO_14 = grad_2_beta1[pp]; const double DENDRO_15 = (1.0L/3.0L)*gt2[pp]; const double DENDRO_16 = (2.0L/3.0L)*DENDRO_3; const double DENDRO_17 = (2.0L/3.0L)*DENDRO_1; const double DENDRO_18 = 2*gt4[pp]; const double DENDRO_19 = (1.0L/3.0L)*gt4[pp]; // Dendro: printing variables gt_rhs00[pp] = -At0[pp]*DENDRO_0 + (4.0L/3.0L)*DENDRO_1*gt0[pp] - DENDRO_2*DENDRO_3 - DENDRO_2*DENDRO_4 + DENDRO_5*DENDRO_6 + DENDRO_7*DENDRO_8 + beta0[pp]*agrad_0_gt0[pp] + beta1[pp]*agrad_1_gt0[pp] + beta2[pp]*agrad_2_gt0[pp]; gt_rhs01[pp] = -At1[pp]*DENDRO_0 + DENDRO_1*DENDRO_11 + DENDRO_10*gt2[pp] + DENDRO_11*DENDRO_3 - DENDRO_12*gt1[pp] + DENDRO_6*gt3[pp] + DENDRO_8*gt4[pp] + DENDRO_9*gt0[pp] + beta0[pp]*agrad_0_gt1[pp] + beta1[pp]*agrad_1_gt1[pp] + beta2[pp]*agrad_2_gt1[pp]; gt_rhs02[pp] = -At2[pp]*DENDRO_0 + DENDRO_1*DENDRO_15 + DENDRO_13*gt0[pp] + DENDRO_14*gt1[pp] + DENDRO_15*DENDRO_4 - DENDRO_16*gt2[pp] + DENDRO_6*gt4[pp] + DENDRO_8*gt5[pp] + beta0[pp]*agrad_0_gt2[pp] + beta1[pp]*agrad_1_gt2[pp] + beta2[pp]*agrad_2_gt2[pp]; gt_rhs11[pp] = -At3[pp]*DENDRO_0 + DENDRO_10*DENDRO_18 - DENDRO_12*gt3[pp] - DENDRO_17*gt3[pp] + (4.0L/3.0L)*DENDRO_3*gt3[pp] + DENDRO_5*DENDRO_9 + beta0[pp]*agrad_0_gt3[pp] + beta1[pp]*agrad_1_gt3[pp] + beta2[pp]*agrad_2_gt3[pp]; gt_rhs12[pp] = -At4[pp]*DENDRO_0 + DENDRO_10*gt5[pp] + DENDRO_13*gt1[pp] + DENDRO_14*gt3[pp] - DENDRO_17*gt4[pp] + DENDRO_19*DENDRO_3 + DENDRO_19*DENDRO_4 + DENDRO_9*gt2[pp] + beta0[pp]*agrad_0_gt4[pp] + beta1[pp]*agrad_1_gt4[pp] + beta2[pp]*agrad_2_gt4[pp]; gt_rhs22[pp] = -At5[pp]*DENDRO_0 + DENDRO_13*DENDRO_7 + DENDRO_14*DENDRO_18 - DENDRO_16*gt5[pp] - DENDRO_17*gt5[pp] + (4.0L/3.0L)*DENDRO_4*gt5[pp] + beta0[pp]*agrad_0_gt5[pp] + beta1[pp]*agrad_1_gt5[pp] + beta2[pp]*agrad_2_gt5[pp]; // Dendro: reduced ops: 162 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(gt_rhs01, &__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs22, &__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs11, &__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs00, &__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs12, &__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(gt_rhs02, &__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_gt_rhs /**@brief compute chi_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_chi_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for chi_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={7,7,7}; //input vars begin double * K = __sm_base + 0; double * beta1 = __sm_base + 343; double * beta2 = __sm_base + 686; double * alpha = __sm_base + 1029; double * beta0 = __sm_base + 1372; double * chi = __sm_base + 1715; //input vars end // staged vars begin // staged vars end // deriv vars begin double * agrad_1_chi = __sm_base + 2058; double * grad_0_beta0 = __sm_base + 2401; double * agrad_0_chi = __sm_base + 2744; double * agrad_2_chi = __sm_base + 3087; double * grad_1_beta1 = __sm_base + 3430; double * grad_2_beta2 = __sm_base + 3773; // deriv vars end // output vars begin double * chi_rhs = __sm_base + 4116; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 22 // Dendro: printing temp variables const double DENDRO_0 = (2.0L/3.0L)*chi[pp]; // Dendro: printing variables chi_rhs[pp] = DENDRO_0*K[pp]*alpha[pp] - DENDRO_0*(grad_0_beta0[pp] + grad_1_beta1[pp] + grad_2_beta2[pp]) + beta0[pp]*agrad_0_chi[pp] + beta1[pp]*agrad_1_chi[pp] + beta2[pp]*agrad_2_chi[pp]; // Dendro: reduced ops: 20 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(chi_rhs, &__unzipOutVar[cuda::VAR::U_CHI][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_chi_rhs /**@brief compute At_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_At_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for At_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * K = __sm_base + 0; double * gt1 = __sm_base + 27; double * beta1 = __sm_base + 54; double * gt3 = __sm_base + 81; double * At1 = __sm_base + 108; double * gt5 = __sm_base + 135; double * alpha = __sm_base + 162; double * gt4 = __sm_base + 189; double * gt2 = __sm_base + 216; double * beta2 = __sm_base + 243; double * At3 = __sm_base + 270; double * At4 = __sm_base + 297; double * At0 = __sm_base + 324; double * At2 = __sm_base + 351; double * beta0 = __sm_base + 378; double * gt0 = __sm_base + 405; double * chi = __sm_base + 432; double * At5 = __sm_base + 459; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad2_0_0_gt3 = __sm_base + 486; double * grad2_2_2_alpha = __sm_base + 513; double * grad2_1_2_gt1 = __sm_base + 540; double * grad_2_gt3 = __sm_base + 567; double * grad_1_beta1 = __sm_base + 594; double * grad_0_Gt1 = __sm_base + 621; double * grad_1_gt5 = __sm_base + 648; double * grad2_0_2_gt5 = __sm_base + 675; double * grad2_1_1_alpha = __sm_base + 702; double * agrad_1_At2 = __sm_base + 729; double * grad2_0_1_gt0 = __sm_base + 756; double * agrad_2_At1 = __sm_base + 783; double * grad_1_gt1 = __sm_base + 810; double * agrad_0_At3 = __sm_base + 837; double * agrad_1_At4 = __sm_base + 864; double * grad_2_beta2 = __sm_base + 891; double * grad_0_chi = __sm_base + 918; double * agrad_2_At4 = __sm_base + 945; double * grad2_0_2_gt4 = __sm_base + 972; double * grad_1_chi = __sm_base + 999; double * grad2_0_1_gt1 = __sm_base + 1026; double * grad2_1_2_alpha = __sm_base + 1053; double * grad2_1_2_gt3 = __sm_base + 1080; double * grad2_2_2_gt1 = __sm_base + 1107; double * agrad_2_At3 = __sm_base + 1134; double * grad2_0_0_gt2 = __sm_base + 1161; double * grad_1_beta0 = __sm_base + 1188; double * grad_0_gt2 = __sm_base + 1215; double * grad_0_beta2 = __sm_base + 1242; double * grad2_2_2_gt4 = __sm_base + 1269; double * agrad_2_At5 = __sm_base + 1296; double * grad_0_gt5 = __sm_base + 1323; double * grad2_0_1_alpha = __sm_base + 1350; double * agrad_2_At2 = __sm_base + 1377; double * grad_0_Gt2 = __sm_base + 1404; double * grad_0_gt4 = __sm_base + 1431; double * grad2_2_2_chi = __sm_base + 1458; double * grad2_0_2_gt3 = __sm_base + 1485; double * agrad_1_At1 = __sm_base + 1512; double * grad2_0_0_gt4 = __sm_base + 1539; double * grad_0_gt1 = __sm_base + 1566; double * grad2_0_0_gt0 = __sm_base + 1593; double * agrad_0_At4 = __sm_base + 1620; double * grad2_1_1_gt4 = __sm_base + 1647; double * grad2_0_2_chi = __sm_base + 1674; double * grad2_0_1_chi = __sm_base + 1701; double * grad2_0_2_gt1 = __sm_base + 1728; double * agrad_0_At2 = __sm_base + 1755; double * grad2_0_0_gt5 = __sm_base + 1782; double * grad_2_Gt2 = __sm_base + 1809; double * grad_1_Gt2 = __sm_base + 1836; double * agrad_0_At0 = __sm_base + 1863; double * grad_0_gt3 = __sm_base + 1890; double * grad_2_beta1 = __sm_base + 1917; double * grad_1_gt3 = __sm_base + 1944; double * grad2_1_1_gt3 = __sm_base + 1971; double * grad2_0_2_alpha = __sm_base + 1998; double * grad2_0_1_gt5 = __sm_base + 2025; double * agrad_2_At0 = __sm_base + 2052; double * grad2_2_2_gt0 = __sm_base + 2079; double * grad_1_gt2 = __sm_base + 2106; double * grad2_0_0_gt1 = __sm_base + 2133; double * grad2_0_1_gt3 = __sm_base + 2160; double * grad_2_Gt0 = __sm_base + 2187; double * grad_1_alpha = __sm_base + 2214; double * grad2_1_2_gt4 = __sm_base + 2241; double * grad2_1_1_gt5 = __sm_base + 2268; double * grad_2_gt4 = __sm_base + 2295; double * grad2_2_2_gt5 = __sm_base + 2322; double * grad_2_gt2 = __sm_base + 2349; double * agrad_1_At0 = __sm_base + 2376; double * grad2_2_2_gt3 = __sm_base + 2403; double * grad_2_gt1 = __sm_base + 2430; double * grad2_0_2_gt2 = __sm_base + 2457; double * grad_1_gt0 = __sm_base + 2484; double * grad_0_beta0 = __sm_base + 2511; double * grad_1_Gt0 = __sm_base + 2538; double * grad2_1_2_gt5 = __sm_base + 2565; double * grad_2_gt0 = __sm_base + 2592; double * grad_2_Gt1 = __sm_base + 2619; double * grad2_1_1_gt2 = __sm_base + 2646; double * grad2_2_2_gt2 = __sm_base + 2673; double * grad_2_alpha = __sm_base + 2700; double * agrad_1_At5 = __sm_base + 2727; double * grad_2_beta0 = __sm_base + 2754; double * grad_1_gt4 = __sm_base + 2781; double * grad2_1_1_gt0 = __sm_base + 2808; double * grad2_0_2_gt0 = __sm_base + 2835; double * grad_0_beta1 = __sm_base + 2862; double * grad_0_alpha = __sm_base + 2889; double * grad_1_Gt1 = __sm_base + 2916; double * grad2_1_2_gt0 = __sm_base + 2943; double * grad2_0_0_alpha = __sm_base + 2970; double * grad2_0_1_gt2 = __sm_base + 2997; double * grad_0_gt0 = __sm_base + 3024; double * grad2_1_2_gt2 = __sm_base + 3051; double * grad_2_gt5 = __sm_base + 3078; double * agrad_0_At1 = __sm_base + 3105; double * agrad_1_At3 = __sm_base + 3132; double * grad_2_chi = __sm_base + 3159; double * grad2_0_0_chi = __sm_base + 3186; double * agrad_0_At5 = __sm_base + 3213; double * grad2_1_1_gt1 = __sm_base + 3240; double * grad_0_Gt0 = __sm_base + 3267; double * grad2_1_1_chi = __sm_base + 3294; double * grad2_0_1_gt4 = __sm_base + 3321; double * grad2_1_2_chi = __sm_base + 3348; double * grad_1_beta2 = __sm_base + 3375; // deriv vars end // output vars begin double * At_rhs12 = __sm_base + 3402; double * At_rhs11 = __sm_base + 3429; double * At_rhs22 = __sm_base + 3456; double * At_rhs02 = __sm_base + 3483; double * At_rhs00 = __sm_base + 3510; double * At_rhs01 = __sm_base + 3537; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 630012 // Dendro: printing temp variables const double DENDRO_0 = grad_0_beta0[pp]; const double DENDRO_1 = (2.0L/3.0L)*At0[pp]; const double DENDRO_2 = grad_1_beta1[pp]; const double DENDRO_3 = grad_2_beta2[pp]; const double DENDRO_4 = 2*At1[pp]; const double DENDRO_5 = grad_0_beta1[pp]; const double DENDRO_6 = 2*At2[pp]; const double DENDRO_7 = grad_0_beta2[pp]; const double DENDRO_8 = pow(gt4[pp], 2); const double DENDRO_9 = DENDRO_8*gt0[pp]; const double DENDRO_10 = pow(gt1[pp], 2); const double DENDRO_11 = DENDRO_10*gt5[pp]; const double DENDRO_12 = pow(gt2[pp], 2); const double DENDRO_13 = DENDRO_12*gt3[pp]; const double DENDRO_14 = gt0[pp]*gt3[pp]; const double DENDRO_15 = DENDRO_14*gt5[pp]; const double DENDRO_16 = gt1[pp]*gt2[pp]; const double DENDRO_17 = 2*DENDRO_16*gt4[pp]; const double DENDRO_18 = DENDRO_11 + DENDRO_13 - DENDRO_15 - DENDRO_17 + DENDRO_9; const double DENDRO_19 = 1.0/DENDRO_18; const double DENDRO_20 = 2*At1[pp]*DENDRO_19; const double DENDRO_21 = gt1[pp]*gt5[pp]; const double DENDRO_22 = gt2[pp]*gt4[pp]; const double DENDRO_23 = DENDRO_21 - DENDRO_22; const double DENDRO_24 = gt0[pp]*gt4[pp]; const double DENDRO_25 = -DENDRO_16 + DENDRO_24; const double DENDRO_26 = -DENDRO_12 + gt0[pp]*gt5[pp]; const double DENDRO_27 = 2*At0[pp]*DENDRO_19; const double DENDRO_28 = At1[pp]*DENDRO_23; const double DENDRO_29 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_30 = -At2[pp]*DENDRO_29; const double DENDRO_31 = -DENDRO_8 + gt3[pp]*gt5[pp]; const double DENDRO_32 = 2*At2[pp]*DENDRO_19; const double DENDRO_33 = -DENDRO_10 + DENDRO_14; const double DENDRO_34 = (1.0L/12.0L)*chi[pp]; const double DENDRO_35 = grad2_0_0_alpha[pp]; const double DENDRO_36 = grad_1_alpha[pp]; const double DENDRO_37 = 1.0/chi[pp]; const double DENDRO_38 = grad_2_chi[pp]; const double DENDRO_39 = grad_0_chi[pp]; const double DENDRO_40 = grad_1_chi[pp]; const double DENDRO_41 = DENDRO_26*DENDRO_40; const double DENDRO_42 = DENDRO_23*DENDRO_39 + DENDRO_25*DENDRO_38 - DENDRO_41; const double DENDRO_43 = 0.5*DENDRO_37*DENDRO_42; const double DENDRO_44 = grad_0_gt0[pp]; const double DENDRO_45 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_46 = grad_0_gt2[pp]; const double DENDRO_47 = 1.0*DENDRO_46; const double DENDRO_48 = grad_2_gt0[pp]; const double DENDRO_49 = 0.5*DENDRO_48; const double DENDRO_50 = DENDRO_47 - DENDRO_49; const double DENDRO_51 = grad_0_gt1[pp]; const double DENDRO_52 = 1.0*DENDRO_51; const double DENDRO_53 = grad_1_gt0[pp]; const double DENDRO_54 = 0.5*DENDRO_53; const double DENDRO_55 = DENDRO_52 - DENDRO_54; const double DENDRO_56 = DENDRO_26*DENDRO_55; const double DENDRO_57 = DENDRO_25*DENDRO_50 + DENDRO_44*DENDRO_45 - DENDRO_56; const double DENDRO_58 = DENDRO_19*DENDRO_36*(DENDRO_43*gt0[pp] + DENDRO_57); const double DENDRO_59 = grad_2_alpha[pp]; const double DENDRO_60 = 12*DENDRO_19*DENDRO_59; const double DENDRO_61 = DENDRO_25*DENDRO_40; const double DENDRO_62 = DENDRO_29*DENDRO_39; const double DENDRO_63 = DENDRO_33*DENDRO_38; const double DENDRO_64 = DENDRO_61 - DENDRO_62 - DENDRO_63; const double DENDRO_65 = 0.5*DENDRO_37*DENDRO_64; const double DENDRO_66 = DENDRO_65*gt0[pp]; const double DENDRO_67 = DENDRO_25*DENDRO_55; const double DENDRO_68 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_69 = DENDRO_44*DENDRO_68; const double DENDRO_70 = DENDRO_33*DENDRO_50; const double DENDRO_71 = DENDRO_69 + DENDRO_70; const double DENDRO_72 = -DENDRO_67 + DENDRO_71; const double DENDRO_73 = grad_0_alpha[pp]; const double DENDRO_74 = 0.5*DENDRO_31*DENDRO_44; const double DENDRO_75 = DENDRO_19*DENDRO_74; const double DENDRO_76 = DENDRO_29*DENDRO_50; const double DENDRO_77 = DENDRO_19*DENDRO_76; const double DENDRO_78 = DENDRO_23*DENDRO_55; const double DENDRO_79 = DENDRO_19*DENDRO_78; const double DENDRO_80 = 1.0/(-DENDRO_11 - DENDRO_13 + DENDRO_15 + DENDRO_17 - DENDRO_9); const double DENDRO_81 = -DENDRO_21 + DENDRO_22; const double DENDRO_82 = DENDRO_29*DENDRO_38; const double DENDRO_83 = DENDRO_31*DENDRO_39; const double DENDRO_84 = DENDRO_82 + DENDRO_83; const double DENDRO_85 = DENDRO_40*DENDRO_81 + DENDRO_84; const double DENDRO_86 = DENDRO_37*(-1.0*DENDRO_39 + 0.5*DENDRO_80*DENDRO_85*gt0[pp]); const double DENDRO_87 = 3*alpha[pp]; const double DENDRO_88 = grad_0_Gt0[pp]; const double DENDRO_89 = 4*gt1[pp]; const double DENDRO_90 = grad_0_Gt1[pp]; const double DENDRO_91 = 4*gt2[pp]; const double DENDRO_92 = grad_0_Gt2[pp]; const double DENDRO_93 = pow(chi[pp], -2); const double DENDRO_94 = pow(DENDRO_39, 2); const double DENDRO_95 = 4.0*DENDRO_19*DENDRO_25; const double DENDRO_96 = 4*DENDRO_19*DENDRO_29; const double DENDRO_97 = 4.0*DENDRO_19*DENDRO_23; const double DENDRO_98 = 2.0*DENDRO_19*DENDRO_33; const double DENDRO_99 = 2.0*DENDRO_19*DENDRO_26; const double DENDRO_100 = 2.0*DENDRO_19*DENDRO_31; const double DENDRO_101 = pow(DENDRO_18, -2); const double DENDRO_102 = 4*DENDRO_101*DENDRO_33; const double DENDRO_103 = grad_0_gt4[pp]; const double DENDRO_104 = 0.25*DENDRO_103; const double DENDRO_105 = -DENDRO_104; const double DENDRO_106 = grad_1_gt2[pp]; const double DENDRO_107 = 0.25*DENDRO_106; const double DENDRO_108 = grad_2_gt1[pp]; const double DENDRO_109 = 0.75*DENDRO_108; const double DENDRO_110 = grad_0_gt5[pp]; const double DENDRO_111 = DENDRO_103 - DENDRO_106 + DENDRO_108; const double DENDRO_112 = DENDRO_111*DENDRO_26; const double DENDRO_113 = DENDRO_110*DENDRO_25 - DENDRO_112 + DENDRO_23*DENDRO_48; const double DENDRO_114 = 4*DENDRO_101; const double DENDRO_115 = 2.0*DENDRO_101*DENDRO_23; const double DENDRO_116 = DENDRO_23*DENDRO_53; const double DENDRO_117 = grad_0_gt3[pp]; const double DENDRO_118 = DENDRO_117*DENDRO_26; const double DENDRO_119 = DENDRO_103 + DENDRO_106 - DENDRO_108; const double DENDRO_120 = DENDRO_119*DENDRO_25; const double DENDRO_121 = DENDRO_116 - DENDRO_118 + DENDRO_120; const double DENDRO_122 = DENDRO_117*DENDRO_57; const double DENDRO_123 = 4*DENDRO_101*DENDRO_25; const double DENDRO_124 = 0.25*DENDRO_117; const double DENDRO_125 = DENDRO_113*DENDRO_124; const double DENDRO_126 = -DENDRO_103 + DENDRO_106 + DENDRO_108; const double DENDRO_127 = DENDRO_121*DENDRO_126; const double DENDRO_128 = 2.0*DENDRO_101*DENDRO_29; const double DENDRO_129 = 4*DENDRO_101*DENDRO_29; const double DENDRO_130 = 0.25*DENDRO_53; const double DENDRO_131 = -DENDRO_130; const double DENDRO_132 = DENDRO_131 + 0.5*DENDRO_51; const double DENDRO_133 = DENDRO_113*DENDRO_132; const double DENDRO_134 = 4*DENDRO_101*DENDRO_23; const double DENDRO_135 = DENDRO_121*DENDRO_132; const double DENDRO_136 = 0.5*DENDRO_117; const double DENDRO_137 = grad_1_gt1[pp]; const double DENDRO_138 = 1.0*DENDRO_137; const double DENDRO_139 = -DENDRO_138; const double DENDRO_140 = DENDRO_136 + DENDRO_139; const double DENDRO_141 = DENDRO_100*grad2_0_0_gt0[pp] - DENDRO_102*DENDRO_113*(DENDRO_105 + DENDRO_107 + DENDRO_109) - DENDRO_114*DENDRO_31*DENDRO_57*(DENDRO_52 + DENDRO_54) + DENDRO_115*(DENDRO_121*DENDRO_53 + DENDRO_122) + DENDRO_123*(DENDRO_125 + 0.5*DENDRO_127) - DENDRO_128*(DENDRO_111*DENDRO_57 + DENDRO_113*DENDRO_53) - DENDRO_129*(DENDRO_126*DENDRO_57 + DENDRO_133) + DENDRO_134*(DENDRO_135 - 2*DENDRO_140*DENDRO_57) + 4*DENDRO_88*gt0[pp] + DENDRO_89*DENDRO_90 + DENDRO_91*DENDRO_92 - DENDRO_93*DENDRO_94 - DENDRO_95*grad2_1_2_gt0[pp] + DENDRO_96*grad2_0_2_gt0[pp] - DENDRO_97*grad2_0_1_gt0[pp] + DENDRO_98*grad2_2_2_gt0[pp] + DENDRO_99*grad2_1_1_gt0[pp]; const double DENDRO_142 = 3.0*DENDRO_101*DENDRO_33; const double DENDRO_143 = DENDRO_111*DENDRO_23; const double DENDRO_144 = DENDRO_110*DENDRO_29; const double DENDRO_145 = DENDRO_31*DENDRO_48; const double DENDRO_146 = DENDRO_144 + DENDRO_145; const double DENDRO_147 = -DENDRO_143 + DENDRO_146; const double DENDRO_148 = DENDRO_147*DENDRO_48; const double DENDRO_149 = 3.0*DENDRO_101*DENDRO_26; const double DENDRO_150 = DENDRO_117*DENDRO_23; const double DENDRO_151 = DENDRO_31*DENDRO_53; const double DENDRO_152 = DENDRO_119*DENDRO_29; const double DENDRO_153 = DENDRO_151 + DENDRO_152; const double DENDRO_154 = -DENDRO_150 + DENDRO_153; const double DENDRO_155 = DENDRO_154*DENDRO_53; const double DENDRO_156 = 4*DENDRO_101*DENDRO_121*DENDRO_26; const double DENDRO_157 = 6.0*DENDRO_101*DENDRO_44; const double DENDRO_158 = DENDRO_74 + DENDRO_76; const double DENDRO_159 = DENDRO_158 - DENDRO_78; const double DENDRO_160 = 0.25*DENDRO_110; const double DENDRO_161 = grad_2_gt2[pp]; const double DENDRO_162 = 1.0*DENDRO_161; const double DENDRO_163 = -DENDRO_162; const double DENDRO_164 = DENDRO_29*DENDRO_48; const double DENDRO_165 = DENDRO_110*DENDRO_33; const double DENDRO_166 = DENDRO_111*DENDRO_25; const double DENDRO_167 = -DENDRO_164 - DENDRO_165 + DENDRO_166; const double DENDRO_168 = 4*DENDRO_101*DENDRO_167*DENDRO_33; const double DENDRO_169 = DENDRO_117*DENDRO_25; const double DENDRO_170 = DENDRO_29*DENDRO_53; const double DENDRO_171 = DENDRO_119*DENDRO_33; const double DENDRO_172 = DENDRO_170 + DENDRO_171; const double DENDRO_173 = -DENDRO_169 + DENDRO_172; const double DENDRO_174 = 0.75*DENDRO_106; const double DENDRO_175 = 0.25*DENDRO_108; const double DENDRO_176 = 4*DENDRO_101*DENDRO_26*(DENDRO_105 + DENDRO_174 + DENDRO_175); const double DENDRO_177 = 4*DENDRO_101*DENDRO_31; const double DENDRO_178 = DENDRO_47 + DENDRO_49; const double DENDRO_179 = 0.25*DENDRO_48; const double DENDRO_180 = DENDRO_154*DENDRO_179; const double DENDRO_181 = DENDRO_130*DENDRO_147; const double DENDRO_182 = DENDRO_164 + DENDRO_165; const double DENDRO_183 = -DENDRO_166 + DENDRO_182; const double DENDRO_184 = DENDRO_110*DENDRO_72; const double DENDRO_185 = DENDRO_147*DENDRO_44; const double DENDRO_186 = DENDRO_159*DENDRO_48; const double DENDRO_187 = DENDRO_154*DENDRO_44; const double DENDRO_188 = DENDRO_159*DENDRO_53; const double DENDRO_189 = 0.25*DENDRO_185; const double DENDRO_190 = 0.25*DENDRO_187; const double DENDRO_191 = DENDRO_160*DENDRO_173; const double DENDRO_192 = DENDRO_126*DENDRO_183; const double DENDRO_193 = DENDRO_119*DENDRO_72; const double DENDRO_194 = DENDRO_113*DENDRO_140; const double DENDRO_195 = DENDRO_111*DENDRO_121; const double DENDRO_196 = 0.25*DENDRO_195; const double DENDRO_197 = 0.5*DENDRO_110; const double DENDRO_198 = DENDRO_163 + DENDRO_197; const double DENDRO_199 = DENDRO_169 - DENDRO_170 - DENDRO_171; const double DENDRO_200 = DENDRO_198*DENDRO_199; const double DENDRO_201 = DENDRO_119*DENDRO_167; const double DENDRO_202 = 0.25*DENDRO_201; const double DENDRO_203 = -DENDRO_202; const double DENDRO_204 = -DENDRO_179; const double DENDRO_205 = DENDRO_204 + 0.5*DENDRO_46; const double DENDRO_206 = DENDRO_110 - 2.0*DENDRO_161; const double DENDRO_207 = 2*DENDRO_37; const double DENDRO_208 = grad2_0_0_chi[pp]; const double DENDRO_209 = -DENDRO_208; const double DENDRO_210 = DENDRO_19*DENDRO_38; const double DENDRO_211 = DENDRO_67 - DENDRO_69 - DENDRO_70; const double DENDRO_212 = DENDRO_19*DENDRO_40; const double DENDRO_213 = DENDRO_19*DENDRO_39; const double DENDRO_214 = -DENDRO_74 - DENDRO_76 + DENDRO_78; const double DENDRO_215 = DENDRO_113*DENDRO_29; const double DENDRO_216 = grad_1_gt5[pp]; const double DENDRO_217 = DENDRO_216*DENDRO_25; const double DENDRO_218 = grad_2_gt3[pp]; const double DENDRO_219 = DENDRO_218*DENDRO_26; const double DENDRO_220 = DENDRO_126*DENDRO_23; const double DENDRO_221 = DENDRO_217 - DENDRO_219 + DENDRO_220; const double DENDRO_222 = DENDRO_221*DENDRO_25; const double DENDRO_223 = DENDRO_121*DENDRO_23; const double DENDRO_224 = grad_2_gt5[pp]; const double DENDRO_225 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_226 = 0.5*DENDRO_216; const double DENDRO_227 = grad_2_gt4[pp]; const double DENDRO_228 = 1.0*DENDRO_227; const double DENDRO_229 = -DENDRO_228; const double DENDRO_230 = DENDRO_226 + DENDRO_229; const double DENDRO_231 = -DENDRO_198*DENDRO_23 + DENDRO_224*DENDRO_225 + DENDRO_230*DENDRO_26; const double DENDRO_232 = DENDRO_231*DENDRO_33; const double DENDRO_233 = grad_1_gt3[pp]; const double DENDRO_234 = 0.5*DENDRO_233*DENDRO_26; const double DENDRO_235 = grad_1_gt4[pp]; const double DENDRO_236 = 1.0*DENDRO_235; const double DENDRO_237 = 0.5*DENDRO_218; const double DENDRO_238 = DENDRO_236 - DENDRO_237; const double DENDRO_239 = DENDRO_238*DENDRO_25; const double DENDRO_240 = DENDRO_140*DENDRO_23; const double DENDRO_241 = -DENDRO_234 + DENDRO_239 - DENDRO_240; const double DENDRO_242 = DENDRO_241*DENDRO_26; const double DENDRO_243 = DENDRO_31*DENDRO_57; const double DENDRO_244 = 2.0*DENDRO_101*(DENDRO_215 - 1.0*DENDRO_222 - 1.0*DENDRO_223 + DENDRO_232 + DENDRO_242 + DENDRO_243); const double DENDRO_245 = DENDRO_167*DENDRO_29; const double DENDRO_246 = DENDRO_218*DENDRO_25; const double DENDRO_247 = DENDRO_216*DENDRO_33; const double DENDRO_248 = DENDRO_126*DENDRO_29; const double DENDRO_249 = DENDRO_246 - DENDRO_247 - DENDRO_248; const double DENDRO_250 = DENDRO_249*DENDRO_25; const double DENDRO_251 = DENDRO_199*DENDRO_23; const double DENDRO_252 = 0.5*DENDRO_224*DENDRO_33; const double DENDRO_253 = DENDRO_198*DENDRO_29; const double DENDRO_254 = DENDRO_230*DENDRO_25; const double DENDRO_255 = -DENDRO_252 + DENDRO_253 - DENDRO_254; const double DENDRO_256 = DENDRO_255*DENDRO_33; const double DENDRO_257 = DENDRO_238*DENDRO_33; const double DENDRO_258 = DENDRO_140*DENDRO_29 + DENDRO_225*DENDRO_233 - DENDRO_257; const double DENDRO_259 = DENDRO_258*DENDRO_26; const double DENDRO_260 = DENDRO_211*DENDRO_31; const double DENDRO_261 = 2.0*DENDRO_101*(DENDRO_245 - 1.0*DENDRO_250 - 1.0*DENDRO_251 + DENDRO_256 + DENDRO_259 + DENDRO_260); const double DENDRO_262 = DENDRO_143 - DENDRO_144 - DENDRO_145; const double DENDRO_263 = DENDRO_262*DENDRO_29; const double DENDRO_264 = DENDRO_218*DENDRO_23; const double DENDRO_265 = DENDRO_216*DENDRO_29; const double DENDRO_266 = DENDRO_126*DENDRO_31; const double DENDRO_267 = DENDRO_264 - DENDRO_265 - DENDRO_266; const double DENDRO_268 = DENDRO_25*DENDRO_267; const double DENDRO_269 = DENDRO_150 - DENDRO_151 - DENDRO_152; const double DENDRO_270 = DENDRO_23*DENDRO_269; const double DENDRO_271 = DENDRO_224*DENDRO_68; const double DENDRO_272 = DENDRO_23*DENDRO_230; const double DENDRO_273 = DENDRO_198*DENDRO_31; const double DENDRO_274 = -DENDRO_271 - DENDRO_272 + DENDRO_273; const double DENDRO_275 = DENDRO_274*DENDRO_33; const double DENDRO_276 = DENDRO_238*DENDRO_29; const double DENDRO_277 = DENDRO_140*DENDRO_31 + DENDRO_233*DENDRO_45 - DENDRO_276; const double DENDRO_278 = DENDRO_26*DENDRO_277; const double DENDRO_279 = DENDRO_214*DENDRO_31; const double DENDRO_280 = 2.0*DENDRO_101*(DENDRO_263 - 1.0*DENDRO_268 - 1.0*DENDRO_270 + DENDRO_275 + DENDRO_278 + DENDRO_279); const double DENDRO_281 = grad2_2_2_chi[pp]; const double DENDRO_282 = 3*DENDRO_37; const double DENDRO_283 = pow(DENDRO_38, 2); const double DENDRO_284 = DENDRO_33*(2*DENDRO_281 - DENDRO_282*DENDRO_283); const double DENDRO_285 = grad2_1_1_chi[pp]; const double DENDRO_286 = pow(DENDRO_40, 2); const double DENDRO_287 = DENDRO_26*(-DENDRO_282*DENDRO_286 + 2*DENDRO_285); const double DENDRO_288 = DENDRO_31*(2*DENDRO_208 - DENDRO_282*DENDRO_94); const double DENDRO_289 = grad2_1_2_chi[pp]; const double DENDRO_290 = DENDRO_38*DENDRO_40; const double DENDRO_291 = 2*DENDRO_25*(-DENDRO_282*DENDRO_290 + 2*DENDRO_289); const double DENDRO_292 = grad2_0_2_chi[pp]; const double DENDRO_293 = 3*DENDRO_37*DENDRO_39; const double DENDRO_294 = 2*DENDRO_29*(2*DENDRO_292 - DENDRO_293*DENDRO_38); const double DENDRO_295 = grad2_0_1_chi[pp]; const double DENDRO_296 = 2*DENDRO_23*(-DENDRO_293*DENDRO_40 + 2*DENDRO_295); const double DENDRO_297 = 2*DENDRO_19; const double DENDRO_298 = -1.0*DENDRO_215 + DENDRO_222 + DENDRO_223 - DENDRO_232 - DENDRO_242 - DENDRO_243; const double DENDRO_299 = DENDRO_297*DENDRO_298*DENDRO_40; const double DENDRO_300 = -1.0*DENDRO_245 + DENDRO_250 + DENDRO_251 - DENDRO_256 - DENDRO_259 - DENDRO_260; const double DENDRO_301 = DENDRO_297*DENDRO_300*DENDRO_38; const double DENDRO_302 = -1.0*DENDRO_263 + DENDRO_268 + DENDRO_270 - DENDRO_275 - DENDRO_278 - DENDRO_279; const double DENDRO_303 = DENDRO_297*DENDRO_302*DENDRO_39; const double DENDRO_304 = DENDRO_19*DENDRO_37*(DENDRO_284 + DENDRO_287 + DENDRO_288 - DENDRO_291 + DENDRO_294 - DENDRO_296 + DENDRO_299 + DENDRO_301 + DENDRO_303); const double DENDRO_305 = grad2_2_2_alpha[pp]; const double DENDRO_306 = DENDRO_19*DENDRO_36*(DENDRO_231 + DENDRO_43*gt5[pp]); const double DENDRO_307 = 4*DENDRO_19*DENDRO_73; const double DENDRO_308 = 0.5*gt5[pp]; const double DENDRO_309 = DENDRO_23*DENDRO_40; const double DENDRO_310 = DENDRO_309 - DENDRO_82 - DENDRO_83; const double DENDRO_311 = DENDRO_308*DENDRO_310*DENDRO_37; const double DENDRO_312 = DENDRO_19*DENDRO_252; const double DENDRO_313 = DENDRO_19*DENDRO_253; const double DENDRO_314 = DENDRO_19*DENDRO_254; const double DENDRO_315 = DENDRO_16 - DENDRO_24; const double DENDRO_316 = DENDRO_62 + DENDRO_63; const double DENDRO_317 = DENDRO_315*DENDRO_40 + DENDRO_316; const double DENDRO_318 = DENDRO_37*(DENDRO_308*DENDRO_317*DENDRO_80 - 1.0*DENDRO_38); const double DENDRO_319 = grad_2_Gt0[pp]; const double DENDRO_320 = 4*gt4[pp]; const double DENDRO_321 = grad_2_Gt1[pp]; const double DENDRO_322 = grad_2_Gt2[pp]; const double DENDRO_323 = 4*DENDRO_101*DENDRO_26; const double DENDRO_324 = 0.25*DENDRO_218; const double DENDRO_325 = -DENDRO_324; const double DENDRO_326 = 0.75*DENDRO_103; const double DENDRO_327 = -DENDRO_175; const double DENDRO_328 = 2.0*DENDRO_101*DENDRO_25; const double DENDRO_329 = DENDRO_218*DENDRO_231; const double DENDRO_330 = DENDRO_113*DENDRO_324; const double DENDRO_331 = DENDRO_119*DENDRO_221; const double DENDRO_332 = DENDRO_113*DENDRO_238; const double DENDRO_333 = DENDRO_111*DENDRO_221; const double DENDRO_334 = 0.25*DENDRO_333; const double DENDRO_335 = 0.25*DENDRO_216; const double DENDRO_336 = -0.5*DENDRO_227 + DENDRO_335; const double DENDRO_337 = -DENDRO_221*DENDRO_336; const double DENDRO_338 = DENDRO_100*grad2_0_0_gt5[pp] - DENDRO_113*DENDRO_177*(DENDRO_107 + DENDRO_326 + DENDRO_327) + DENDRO_123*(2*DENDRO_231*DENDRO_238 + DENDRO_337) - DENDRO_128*(DENDRO_111*DENDRO_231 + DENDRO_113*DENDRO_216) + DENDRO_134*(DENDRO_330 + 0.5*DENDRO_331) + DENDRO_134*(DENDRO_332 + DENDRO_334) - DENDRO_221*DENDRO_323*(DENDRO_236 + DENDRO_325) - DENDRO_283*DENDRO_93 + DENDRO_319*DENDRO_91 + DENDRO_320*DENDRO_321 + 4*DENDRO_322*gt5[pp] + DENDRO_328*(DENDRO_216*DENDRO_221 + DENDRO_329) - DENDRO_95*grad2_1_2_gt5[pp] + DENDRO_96*grad2_0_2_gt5[pp] - DENDRO_97*grad2_0_1_gt5[pp] + DENDRO_98*grad2_2_2_gt5[pp] + DENDRO_99*grad2_1_1_gt5[pp]; const double DENDRO_339 = DENDRO_216*DENDRO_249; const double DENDRO_340 = 3.0*DENDRO_101*DENDRO_31; const double DENDRO_341 = DENDRO_110*DENDRO_167; const double DENDRO_342 = 6.0*DENDRO_101*DENDRO_224; const double DENDRO_343 = 4*DENDRO_101*DENDRO_31*(DENDRO_204 + DENDRO_47); const double DENDRO_344 = 4*DENDRO_101*DENDRO_26*(DENDRO_104 + DENDRO_174 + DENDRO_327); const double DENDRO_345 = 4*DENDRO_101*DENDRO_274*DENDRO_33; const double DENDRO_346 = 4*DENDRO_101*DENDRO_231*DENDRO_33; const double DENDRO_347 = DENDRO_167*DENDRO_335; const double DENDRO_348 = DENDRO_160*DENDRO_249; const double DENDRO_349 = DENDRO_224*DENDRO_249; const double DENDRO_350 = DENDRO_216*DENDRO_255; const double DENDRO_351 = DENDRO_167*DENDRO_224; const double DENDRO_352 = DENDRO_110*DENDRO_255; const double DENDRO_353 = DENDRO_274*DENDRO_48; const double DENDRO_354 = 0.25*DENDRO_224; const double DENDRO_355 = DENDRO_249*DENDRO_354; const double DENDRO_356 = DENDRO_167*DENDRO_354; const double DENDRO_357 = DENDRO_179*DENDRO_267; const double DENDRO_358 = DENDRO_119*DENDRO_262; const double DENDRO_359 = DENDRO_126*DENDRO_274; const double DENDRO_360 = DENDRO_126*DENDRO_262; const double DENDRO_361 = 0.25*DENDRO_360; const double DENDRO_362 = DENDRO_113*DENDRO_336; const double DENDRO_363 = -DENDRO_362; const double DENDRO_364 = DENDRO_119*DENDRO_231; const double DENDRO_365 = DENDRO_160 - 0.5*DENDRO_161; const double DENDRO_366 = DENDRO_267*DENDRO_365; const double DENDRO_367 = -DENDRO_366; const double DENDRO_368 = DENDRO_119*DENDRO_274; const double DENDRO_369 = -DENDRO_262*DENDRO_365; const double DENDRO_370 = 2.0*DENDRO_46 - 1.0*DENDRO_48; const double DENDRO_371 = -DENDRO_281; const double DENDRO_372 = DENDRO_38*DENDRO_80; const double DENDRO_373 = -DENDRO_226; const double DENDRO_374 = DENDRO_228 + DENDRO_373; const double DENDRO_375 = -DENDRO_197; const double DENDRO_376 = DENDRO_162 + DENDRO_375; const double DENDRO_377 = DENDRO_40*DENDRO_80; const double DENDRO_378 = -0.5*gt0[pp]*gt4[pp] + 0.5*gt1[pp]*gt2[pp]; const double DENDRO_379 = DENDRO_39*DENDRO_80; const double DENDRO_380 = 2.0*DENDRO_101*DENDRO_298; const double DENDRO_381 = 2.0*DENDRO_101*DENDRO_300; const double DENDRO_382 = 2.0*DENDRO_101*DENDRO_302; const double DENDRO_383 = DENDRO_19*DENDRO_37*(-DENDRO_284 - DENDRO_287 - DENDRO_288 + DENDRO_291 - DENDRO_294 + DENDRO_296 - DENDRO_299 - DENDRO_301 - DENDRO_303); const double DENDRO_384 = grad2_1_1_alpha[pp]; const double DENDRO_385 = 4*DENDRO_19*DENDRO_59; const double DENDRO_386 = 0.5*DENDRO_37*gt3[pp]; const double DENDRO_387 = -1.0*DENDRO_40; const double DENDRO_388 = 0.5*gt3[pp]; const double DENDRO_389 = DENDRO_315*DENDRO_38 + DENDRO_39*DENDRO_81 + DENDRO_41; const double DENDRO_390 = -DENDRO_19*DENDRO_234 + DENDRO_19*DENDRO_239 - DENDRO_19*DENDRO_240; const double DENDRO_391 = grad_1_Gt0[pp]; const double DENDRO_392 = grad_1_Gt1[pp]; const double DENDRO_393 = grad_1_Gt2[pp]; const double DENDRO_394 = DENDRO_218*DENDRO_221; const double DENDRO_395 = DENDRO_117*DENDRO_121; const double DENDRO_396 = DENDRO_121*DENDRO_324; const double DENDRO_397 = DENDRO_124*DENDRO_221; const double DENDRO_398 = DENDRO_100*grad2_0_0_gt3[pp] - DENDRO_114*DENDRO_258*DENDRO_26*(DENDRO_236 + DENDRO_237) - DENDRO_129*(DENDRO_121*DENDRO_237 + DENDRO_397) - DENDRO_129*(DENDRO_136*DENDRO_221 + DENDRO_396) - DENDRO_142*DENDRO_394 - DENDRO_286*DENDRO_93 + DENDRO_320*DENDRO_393 - DENDRO_340*DENDRO_395 + DENDRO_391*DENDRO_89 + 4*DENDRO_392*gt3[pp] - DENDRO_95*grad2_1_2_gt3[pp] + DENDRO_96*grad2_0_2_gt3[pp] - DENDRO_97*grad2_0_1_gt3[pp] + DENDRO_98*grad2_2_2_gt3[pp] + DENDRO_99*grad2_1_1_gt3[pp]; const double DENDRO_399 = 6.0*DENDRO_233; const double DENDRO_400 = 4*DENDRO_101*DENDRO_249*DENDRO_33; const double DENDRO_401 = 4*DENDRO_101*DENDRO_31*(DENDRO_131 + DENDRO_52); const double DENDRO_402 = -DENDRO_107; const double DENDRO_403 = 4*DENDRO_101*DENDRO_33*(DENDRO_104 + DENDRO_109 + DENDRO_402); const double DENDRO_404 = 4*DENDRO_101*DENDRO_31*(DENDRO_175 + DENDRO_326 + DENDRO_402); const double DENDRO_405 = 4*DENDRO_101*DENDRO_26*DENDRO_277; const double DENDRO_406 = DENDRO_221*DENDRO_233; const double DENDRO_407 = DENDRO_218*DENDRO_241; const double DENDRO_408 = DENDRO_121*DENDRO_233; const double DENDRO_409 = DENDRO_117*DENDRO_241; const double DENDRO_410 = DENDRO_216*DENDRO_258; const double DENDRO_411 = DENDRO_277*DENDRO_53; const double DENDRO_412 = 0.25*DENDRO_406; const double DENDRO_413 = 0.25*DENDRO_408; const double DENDRO_414 = DENDRO_199*DENDRO_335; const double DENDRO_415 = 0.5*DENDRO_103; const double DENDRO_416 = 0.5*DENDRO_108; const double DENDRO_417 = -0.5*DENDRO_106 + DENDRO_415 + DENDRO_416; const double DENDRO_418 = DENDRO_130*DENDRO_267; const double DENDRO_419 = DENDRO_126*DENDRO_277; const double DENDRO_420 = DENDRO_119*DENDRO_258; const double DENDRO_421 = DENDRO_126*DENDRO_269; const double DENDRO_422 = 0.25*DENDRO_421; const double DENDRO_423 = DENDRO_199*DENDRO_230; const double DENDRO_424 = DENDRO_119*DENDRO_249; const double DENDRO_425 = 0.25*DENDRO_424; const double DENDRO_426 = DENDRO_124 - 0.5*DENDRO_137; const double DENDRO_427 = DENDRO_267*DENDRO_426; const double DENDRO_428 = -DENDRO_427; const double DENDRO_429 = DENDRO_111*DENDRO_277; const double DENDRO_430 = 0.5*DENDRO_235 + DENDRO_325; const double DENDRO_431 = DENDRO_249*DENDRO_430; const double DENDRO_432 = 2*DENDRO_230*DENDRO_258; const double DENDRO_433 = -DENDRO_269*DENDRO_426; const double DENDRO_434 = 2*DENDRO_277*DENDRO_55; const double DENDRO_435 = DENDRO_199*DENDRO_430; const double DENDRO_436 = DENDRO_111*DENDRO_258; const double DENDRO_437 = -DENDRO_285; const double DENDRO_438 = -DENDRO_136; const double DENDRO_439 = DENDRO_138 + DENDRO_438; const double DENDRO_440 = -0.5*gt1[pp]*gt5[pp] + 0.5*gt2[pp]*gt4[pp]; const double DENDRO_441 = DENDRO_262*DENDRO_48; const double DENDRO_442 = DENDRO_269*DENDRO_53; const double DENDRO_443 = DENDRO_179*DENDRO_269; const double DENDRO_444 = DENDRO_130*DENDRO_262; const double DENDRO_445 = DENDRO_110*DENDRO_211; const double DENDRO_446 = DENDRO_262*DENDRO_44; const double DENDRO_447 = DENDRO_214*DENDRO_48; const double DENDRO_448 = DENDRO_269*DENDRO_44; const double DENDRO_449 = DENDRO_214*DENDRO_53; const double DENDRO_450 = 0.25*DENDRO_446; const double DENDRO_451 = 0.25*DENDRO_448; const double DENDRO_452 = DENDRO_160*DENDRO_199; const double DENDRO_453 = DENDRO_126*DENDRO_167; const double DENDRO_454 = DENDRO_119*DENDRO_211; const double DENDRO_455 = DENDRO_167*DENDRO_205; const double DENDRO_456 = DENDRO_199*DENDRO_205; const double DENDRO_457 = grad2_0_2_alpha[pp]; const double DENDRO_458 = DENDRO_19*DENDRO_36*(DENDRO_113 + DENDRO_37*DENDRO_42*gt2[pp]); const double DENDRO_459 = 2.0*DENDRO_59; const double DENDRO_460 = DENDRO_164*DENDRO_19; const double DENDRO_461 = DENDRO_165*DENDRO_19; const double DENDRO_462 = DENDRO_166*DENDRO_19; const double DENDRO_463 = -DENDRO_39; const double DENDRO_464 = DENDRO_80*gt2[pp]; const double DENDRO_465 = DENDRO_37*(DENDRO_317*DENDRO_464 + DENDRO_463); const double DENDRO_466 = 2.0*DENDRO_73; const double DENDRO_467 = DENDRO_144*DENDRO_19; const double DENDRO_468 = DENDRO_145*DENDRO_19; const double DENDRO_469 = DENDRO_143*DENDRO_19; const double DENDRO_470 = -DENDRO_38; const double DENDRO_471 = DENDRO_37*(DENDRO_464*DENDRO_85 + DENDRO_470); const double DENDRO_472 = -4*DENDRO_457 + 2.0*DENDRO_458 + DENDRO_459*(-DENDRO_460 - DENDRO_461 + DENDRO_462 + DENDRO_465) + DENDRO_466*(-DENDRO_467 - DENDRO_468 + DENDRO_469 + DENDRO_471); const double DENDRO_473 = -DENDRO_292; const double DENDRO_474 = 0.5*DENDRO_38*DENDRO_80; const double DENDRO_475 = 0.5*DENDRO_40*DENDRO_80; const double DENDRO_476 = 0.5*DENDRO_39*DENDRO_80; const double DENDRO_477 = DENDRO_106*DENDRO_380 + DENDRO_161*DENDRO_381 - DENDRO_207*(DENDRO_473 + DENDRO_474*(DENDRO_111*DENDRO_315 + DENDRO_182) + DENDRO_475*(DENDRO_110*DENDRO_315 + DENDRO_112 + DENDRO_48*DENDRO_81) + DENDRO_476*(DENDRO_111*DENDRO_81 + DENDRO_146)) + DENDRO_382*DENDRO_46 + DENDRO_383*gt2[pp]; const double DENDRO_478 = 2.0*gt0[pp]; const double DENDRO_479 = DENDRO_319*DENDRO_478; const double DENDRO_480 = 2.0*gt1[pp]; const double DENDRO_481 = DENDRO_321*DENDRO_480; const double DENDRO_482 = 2.0*gt2[pp]; const double DENDRO_483 = DENDRO_482*DENDRO_88; const double DENDRO_484 = DENDRO_322*DENDRO_482; const double DENDRO_485 = 2.0*gt4[pp]; const double DENDRO_486 = DENDRO_485*DENDRO_90; const double DENDRO_487 = 2.0*gt5[pp]; const double DENDRO_488 = DENDRO_487*DENDRO_92; const double DENDRO_489 = DENDRO_39*DENDRO_93; const double DENDRO_490 = -DENDRO_38*DENDRO_489; const double DENDRO_491 = -DENDRO_95*grad2_1_2_gt2[pp]; const double DENDRO_492 = DENDRO_96*grad2_0_2_gt2[pp]; const double DENDRO_493 = -DENDRO_97*grad2_0_1_gt2[pp]; const double DENDRO_494 = DENDRO_98*grad2_2_2_gt2[pp]; const double DENDRO_495 = DENDRO_99*grad2_1_1_gt2[pp]; const double DENDRO_496 = DENDRO_100*grad2_0_0_gt2[pp]; const double DENDRO_497 = DENDRO_160*DENDRO_262; const double DENDRO_498 = DENDRO_119*DENDRO_269; const double DENDRO_499 = 0.25*DENDRO_498; const double DENDRO_500 = DENDRO_214*DENDRO_50; const double DENDRO_501 = DENDRO_101*DENDRO_23; const double DENDRO_502 = DENDRO_119*DENDRO_121; const double DENDRO_503 = DENDRO_113*DENDRO_117; const double DENDRO_504 = DENDRO_221*DENDRO_53 + DENDRO_503; const double DENDRO_505 = DENDRO_110*DENDRO_199; const double DENDRO_506 = DENDRO_249*DENDRO_48 + DENDRO_505; const double DENDRO_507 = DENDRO_274*DENDRO_54; const double DENDRO_508 = DENDRO_160*DENDRO_269 + DENDRO_507; const double DENDRO_509 = 0.25*DENDRO_44; const double DENDRO_510 = DENDRO_267*DENDRO_509; const double DENDRO_511 = DENDRO_205*DENDRO_269; const double DENDRO_512 = DENDRO_444 + DENDRO_511; const double DENDRO_513 = DENDRO_126*DENDRO_221; const double DENDRO_514 = 0.25*DENDRO_513; const double DENDRO_515 = DENDRO_136*DENDRO_231; const double DENDRO_516 = DENDRO_121*DENDRO_335 + DENDRO_515; const double DENDRO_517 = -DENDRO_221*DENDRO_426; const double DENDRO_518 = DENDRO_121*DENDRO_430; const double DENDRO_519 = DENDRO_397 + DENDRO_518; const double DENDRO_520 = 0.5*DENDRO_44; const double DENDRO_521 = DENDRO_274*DENDRO_520; const double DENDRO_522 = DENDRO_179*DENDRO_262 + DENDRO_521; const double DENDRO_523 = DENDRO_205*DENDRO_262; const double DENDRO_524 = 0.5*DENDRO_106; const double DENDRO_525 = -0.5*DENDRO_103 + DENDRO_416 + DENDRO_524; const double DENDRO_526 = DENDRO_255*DENDRO_525 + DENDRO_347; const double DENDRO_527 = 0.25*DENDRO_358 + DENDRO_507; const double DENDRO_528 = 1.0*DENDRO_101*DENDRO_26; const double DENDRO_529 = DENDRO_199*DENDRO_216; const double DENDRO_530 = DENDRO_126*DENDRO_249; const double DENDRO_531 = -0.5*DENDRO_108 + DENDRO_415 + DENDRO_524; const double DENDRO_532 = DENDRO_214*DENDRO_531 + DENDRO_444; const double DENDRO_533 = DENDRO_167*DENDRO_179; const double DENDRO_534 = DENDRO_198*DENDRO_255; const double DENDRO_535 = -DENDRO_534; const double DENDRO_536 = 0.25*DENDRO_110*DENDRO_25 - 0.25*DENDRO_111*DENDRO_26 + 0.25*DENDRO_23*DENDRO_48; const double DENDRO_537 = DENDRO_111*DENDRO_536; const double DENDRO_538 = DENDRO_119*DENDRO_536 + DENDRO_231*DENDRO_54; const double DENDRO_539 = DENDRO_113*DENDRO_130 + DENDRO_531*DENDRO_57; const double DENDRO_540 = DENDRO_249*DENDRO_365; const double DENDRO_541 = -DENDRO_540; const double DENDRO_542 = DENDRO_199*DENDRO_354; const double DENDRO_543 = DENDRO_255*DENDRO_531 + DENDRO_542; const double DENDRO_544 = DENDRO_211*DENDRO_226; const double DENDRO_545 = DENDRO_205*DENDRO_249; const double DENDRO_546 = 0.25*DENDRO_453; const double DENDRO_547 = DENDRO_132*DENDRO_221; const double DENDRO_548 = -0.5*DENDRO_194 + DENDRO_238*DENDRO_57; const double DENDRO_549 = DENDRO_126*DENDRO_536 + DENDRO_226*DENDRO_57; const double DENDRO_550 = 0.5*DENDRO_224; const double DENDRO_551 = DENDRO_211*DENDRO_550; const double DENDRO_552 = -DENDRO_167*DENDRO_365; const double DENDRO_553 = 0.5*DENDRO_332; const double DENDRO_554 = DENDRO_140*DENDRO_231; const double DENDRO_555 = DENDRO_553 - DENDRO_554; const double DENDRO_556 = DENDRO_113*DENDRO_335 + DENDRO_231*DENDRO_525; const double DENDRO_557 = 0.25*DENDRO_530; const double DENDRO_558 = 0.5*DENDRO_351; const double DENDRO_559 = DENDRO_101*DENDRO_25; const double DENDRO_560 = DENDRO_113*DENDRO_218; const double DENDRO_561 = DENDRO_121*DENDRO_216 + DENDRO_560; const double DENDRO_562 = DENDRO_559*(DENDRO_513 + DENDRO_561); const double DENDRO_563 = DENDRO_267*DENDRO_48; const double DENDRO_564 = DENDRO_110*DENDRO_269 + DENDRO_563; const double DENDRO_565 = DENDRO_179*DENDRO_249; const double DENDRO_566 = DENDRO_452 + DENDRO_544; const double DENDRO_567 = DENDRO_347 + DENDRO_541; const double DENDRO_568 = DENDRO_396 + DENDRO_517; const double DENDRO_569 = -DENDRO_323*(DENDRO_518 + DENDRO_568); const double DENDRO_570 = DENDRO_160*DENDRO_167 + DENDRO_551; const double DENDRO_571 = DENDRO_130*DENDRO_221; const double DENDRO_572 = DENDRO_237*DENDRO_57; const double DENDRO_573 = 0.25*DENDRO_502; const double DENDRO_574 = DENDRO_134*(DENDRO_571 + DENDRO_572 + DENDRO_573); const double DENDRO_575 = DENDRO_267*DENDRO_53; const double DENDRO_576 = -DENDRO_102*(DENDRO_363 + DENDRO_556); const double DENDRO_577 = -DENDRO_129*(DENDRO_537 + DENDRO_549); const double DENDRO_578 = -DENDRO_269*DENDRO_365; const double DENDRO_579 = DENDRO_214*DENDRO_525 + DENDRO_510; const double DENDRO_580 = DENDRO_121*DENDRO_336; const double DENDRO_581 = -DENDRO_580; const double DENDRO_582 = DENDRO_134*(DENDRO_196 + DENDRO_548); const double DENDRO_583 = -DENDRO_129*(-DENDRO_230*DENDRO_57 + DENDRO_538); const double DENDRO_584 = -DENDRO_177*(DENDRO_417*DENDRO_57 + DENDRO_539); const double DENDRO_585 = grad2_1_2_alpha[pp]; const double DENDRO_586 = DENDRO_19*DENDRO_73; const double DENDRO_587 = 2.0*DENDRO_36; const double DENDRO_588 = DENDRO_80*gt4[pp]; const double DENDRO_589 = DENDRO_19*DENDRO_217 - DENDRO_19*DENDRO_219 + DENDRO_19*DENDRO_220; const double DENDRO_590 = DENDRO_19*DENDRO_246; const double DENDRO_591 = DENDRO_19*DENDRO_247; const double DENDRO_592 = DENDRO_19*DENDRO_248; const double DENDRO_593 = -DENDRO_40; const double DENDRO_594 = DENDRO_37*(DENDRO_317*DENDRO_588 + DENDRO_593); const double DENDRO_595 = DENDRO_459*(DENDRO_590 - DENDRO_591 - DENDRO_592 + DENDRO_594) - 4*DENDRO_585 + DENDRO_586*(-2.0*DENDRO_126*DENDRO_31 - 2.0*DENDRO_216*DENDRO_29 + 2.0*DENDRO_218*DENDRO_23 + 2.0*DENDRO_310*DENDRO_37*gt4[pp]) + DENDRO_587*(DENDRO_37*(DENDRO_389*DENDRO_588 + DENDRO_470) + DENDRO_589); const double DENDRO_596 = -DENDRO_289; const double DENDRO_597 = DENDRO_247 + DENDRO_248; const double DENDRO_598 = DENDRO_265 + DENDRO_266; const double DENDRO_599 = DENDRO_103*DENDRO_382 - DENDRO_207*(DENDRO_474*(DENDRO_218*DENDRO_315 + DENDRO_597) + DENDRO_475*(DENDRO_126*DENDRO_81 + DENDRO_216*DENDRO_315 + DENDRO_219) + DENDRO_476*(DENDRO_218*DENDRO_81 + DENDRO_598) + DENDRO_596) + DENDRO_227*DENDRO_381 + DENDRO_235*DENDRO_380 + DENDRO_383*gt4[pp]; const double DENDRO_600 = DENDRO_319*DENDRO_480; const double DENDRO_601 = DENDRO_391*DENDRO_482; const double DENDRO_602 = 2.0*gt3[pp]; const double DENDRO_603 = DENDRO_321*DENDRO_602; const double DENDRO_604 = DENDRO_392*DENDRO_485; const double DENDRO_605 = DENDRO_322*DENDRO_485; const double DENDRO_606 = DENDRO_393*DENDRO_487; const double DENDRO_607 = -DENDRO_290*DENDRO_93; const double DENDRO_608 = -DENDRO_95*grad2_1_2_gt4[pp]; const double DENDRO_609 = DENDRO_96*grad2_0_2_gt4[pp]; const double DENDRO_610 = -DENDRO_97*grad2_0_1_gt4[pp]; const double DENDRO_611 = DENDRO_98*grad2_2_2_gt4[pp]; const double DENDRO_612 = DENDRO_99*grad2_1_1_gt4[pp]; const double DENDRO_613 = DENDRO_100*grad2_0_0_gt4[pp]; const double DENDRO_614 = DENDRO_221*DENDRO_335; const double DENDRO_615 = DENDRO_238*DENDRO_241; const double DENDRO_616 = DENDRO_233*DENDRO_536; const double DENDRO_617 = DENDRO_167*DENDRO_218 + DENDRO_529; const double DENDRO_618 = DENDRO_117*DENDRO_262 + DENDRO_575; const double DENDRO_619 = DENDRO_221*DENDRO_430; const double DENDRO_620 = 0.5*DENDRO_233; const double DENDRO_621 = DENDRO_231*DENDRO_620; const double DENDRO_622 = DENDRO_221*DENDRO_324 + DENDRO_621; const double DENDRO_623 = 0.25*DENDRO_331 + DENDRO_515; const double DENDRO_624 = DENDRO_241*DENDRO_531; const double DENDRO_625 = DENDRO_249*DENDRO_324; const double DENDRO_626 = DENDRO_255*DENDRO_417 + DENDRO_348; const double DENDRO_627 = DENDRO_104 + DENDRO_175 + DENDRO_402; const double DENDRO_628 = DENDRO_262*DENDRO_627; const double DENDRO_629 = 1.0*DENDRO_101*DENDRO_31; const double DENDRO_630 = DENDRO_111*DENDRO_167; const double DENDRO_631 = DENDRO_132*DENDRO_262; const double DENDRO_632 = DENDRO_230*DENDRO_255; const double DENDRO_633 = -DENDRO_632; const double DENDRO_634 = DENDRO_167*DENDRO_336; const double DENDRO_635 = -DENDRO_634; const double DENDRO_636 = DENDRO_197*DENDRO_258; const double DENDRO_637 = DENDRO_167*DENDRO_430; const double DENDRO_638 = DENDRO_249*DENDRO_627; const double DENDRO_639 = -0.25*DENDRO_126*DENDRO_31 - 0.25*DENDRO_216*DENDRO_29 + 0.25*DENDRO_218*DENDRO_23; const double DENDRO_640 = DENDRO_126*DENDRO_639; const double DENDRO_641 = DENDRO_119*DENDRO_639 + DENDRO_136*DENDRO_274; const double DENDRO_642 = DENDRO_277*DENDRO_531; const double DENDRO_643 = DENDRO_124*DENDRO_267 + DENDRO_642; const double DENDRO_644 = DENDRO_258*DENDRO_550; const double DENDRO_645 = -DENDRO_249*DENDRO_336; const double DENDRO_646 = -DENDRO_262*DENDRO_426; const double DENDRO_647 = DENDRO_277*DENDRO_50; const double DENDRO_648 = DENDRO_132*DENDRO_267 + DENDRO_647; const double DENDRO_649 = DENDRO_197*DENDRO_277; const double DENDRO_650 = DENDRO_267*DENDRO_627 + DENDRO_649; const double DENDRO_651 = DENDRO_205*DENDRO_267 + DENDRO_274*DENDRO_55; const double DENDRO_652 = DENDRO_160*DENDRO_267 + DENDRO_274*DENDRO_417; const double DENDRO_653 = 1.0*DENDRO_410; const double DENDRO_654 = 0.25*DENDRO_630; const double DENDRO_655 = 0.5*DENDRO_349; const double DENDRO_656 = 1.0*DENDRO_101*DENDRO_29; const double DENDRO_657 = -DENDRO_656*(DENDRO_333 + DENDRO_561); const double DENDRO_658 = DENDRO_111*DENDRO_262; const double DENDRO_659 = DENDRO_167*DENDRO_324; const double DENDRO_660 = DENDRO_414 + DENDRO_636; const double DENDRO_661 = DENDRO_348 + DENDRO_635; const double DENDRO_662 = -DENDRO_629*(DENDRO_195 + DENDRO_502 + DENDRO_503); const double DENDRO_663 = -DENDRO_102*(DENDRO_231*DENDRO_237 + DENDRO_337 + DENDRO_614); const double DENDRO_664 = DENDRO_249*DENDRO_335 + DENDRO_644; const double DENDRO_665 = DENDRO_396 + DENDRO_397; const double DENDRO_666 = DENDRO_124*DENDRO_262; const double DENDRO_667 = DENDRO_277*DENDRO_49; const double DENDRO_668 = DENDRO_443 + DENDRO_631; const double DENDRO_669 = -DENDRO_129*(DENDRO_581 + DENDRO_623); const double DENDRO_670 = DENDRO_241*DENDRO_417 + DENDRO_616; const double DENDRO_671 = DENDRO_619 + DENDRO_621; const double DENDRO_672 = DENDRO_198*DENDRO_277; const double DENDRO_673 = 0.5*DENDRO_419; const double DENDRO_674 = grad2_0_1_alpha[pp]; const double DENDRO_675 = DENDRO_19*DENDRO_59; const double DENDRO_676 = DENDRO_80*gt1[pp]; const double DENDRO_677 = DENDRO_116*DENDRO_19 - DENDRO_118*DENDRO_19 + DENDRO_120*DENDRO_19; const double DENDRO_678 = DENDRO_150*DENDRO_19; const double DENDRO_679 = DENDRO_151*DENDRO_19; const double DENDRO_680 = DENDRO_152*DENDRO_19; const double DENDRO_681 = DENDRO_37*(DENDRO_593 + DENDRO_676*DENDRO_85); const double DENDRO_682 = DENDRO_466*(DENDRO_678 - DENDRO_679 - DENDRO_680 + DENDRO_681) + DENDRO_587*(DENDRO_37*(DENDRO_389*DENDRO_676 + DENDRO_463) + DENDRO_677) - 4*DENDRO_674 + DENDRO_675*(2.0*DENDRO_117*DENDRO_25 - 2.0*DENDRO_119*DENDRO_33 - 2.0*DENDRO_29*DENDRO_53 + 2.0*DENDRO_37*DENDRO_64*gt1[pp]); const double DENDRO_683 = -DENDRO_295; const double DENDRO_684 = DENDRO_108*DENDRO_381 + DENDRO_137*DENDRO_380 - DENDRO_207*(DENDRO_474*(DENDRO_117*DENDRO_315 + DENDRO_172) + DENDRO_475*(DENDRO_118 + DENDRO_119*DENDRO_315 + DENDRO_53*DENDRO_81) + DENDRO_476*(DENDRO_117*DENDRO_81 + DENDRO_153) + DENDRO_683) + DENDRO_382*DENDRO_51 + DENDRO_383*gt1[pp]; const double DENDRO_685 = DENDRO_391*DENDRO_478; const double DENDRO_686 = DENDRO_480*DENDRO_88; const double DENDRO_687 = DENDRO_392*DENDRO_480; const double DENDRO_688 = DENDRO_393*DENDRO_482; const double DENDRO_689 = DENDRO_602*DENDRO_90; const double DENDRO_690 = DENDRO_485*DENDRO_92; const double DENDRO_691 = -DENDRO_40*DENDRO_489; const double DENDRO_692 = -DENDRO_95*grad2_1_2_gt1[pp]; const double DENDRO_693 = DENDRO_96*grad2_0_2_gt1[pp]; const double DENDRO_694 = -DENDRO_97*grad2_0_1_gt1[pp]; const double DENDRO_695 = DENDRO_98*grad2_2_2_gt1[pp]; const double DENDRO_696 = DENDRO_99*grad2_1_1_gt1[pp]; const double DENDRO_697 = DENDRO_100*grad2_0_0_gt1[pp]; const double DENDRO_698 = DENDRO_121*DENDRO_130; const double DENDRO_699 = -DENDRO_177*(1.0*DENDRO_122 + DENDRO_698); const double DENDRO_700 = -DENDRO_102*(DENDRO_113*DENDRO_237 + DENDRO_514); const double DENDRO_701 = 0.5*DENDRO_408; const double DENDRO_702 = DENDRO_140*DENDRO_241; const double DENDRO_703 = -DENDRO_702; const double DENDRO_704 = DENDRO_123*(DENDRO_568 + DENDRO_616); const double DENDRO_705 = DENDRO_125 + DENDRO_572; const double DENDRO_706 = -DENDRO_129*(DENDRO_571 + DENDRO_705); const double DENDRO_707 = -DENDRO_121*DENDRO_426; const double DENDRO_708 = DENDRO_57*DENDRO_620; const double DENDRO_709 = DENDRO_121*DENDRO_124 + DENDRO_708; const double DENDRO_710 = DENDRO_134*(DENDRO_707 + DENDRO_709); const double DENDRO_711 = DENDRO_241*DENDRO_525; const double DENDRO_712 = DENDRO_396 + DENDRO_616; const double DENDRO_713 = 0.25*DENDRO_127; const double DENDRO_714 = -DENDRO_129*(DENDRO_705 + DENDRO_713); const double DENDRO_715 = 1.0*DENDRO_101*DENDRO_33; const double DENDRO_716 = DENDRO_124*DENDRO_269; const double DENDRO_717 = DENDRO_277*DENDRO_54; const double DENDRO_718 = DENDRO_214*DENDRO_417 + DENDRO_443; const double DENDRO_719 = DENDRO_214*DENDRO_55; const double DENDRO_720 = DENDRO_269*DENDRO_627; const double DENDRO_721 = DENDRO_258*DENDRO_525; const double DENDRO_722 = DENDRO_199*DENDRO_324 + DENDRO_721; const double DENDRO_723 = 0.25*DENDRO_117*DENDRO_25 - 0.25*DENDRO_119*DENDRO_33 - 0.25*DENDRO_29*DENDRO_53; const double DENDRO_724 = DENDRO_119*DENDRO_723; const double DENDRO_725 = DENDRO_126*DENDRO_723 + DENDRO_211*DENDRO_237; const double DENDRO_726 = DENDRO_277*DENDRO_520; const double DENDRO_727 = DENDRO_132*DENDRO_269; const double DENDRO_728 = DENDRO_258*DENDRO_49; const double DENDRO_729 = DENDRO_199*DENDRO_627 + DENDRO_728; const double DENDRO_730 = 0.5*DENDRO_423; const double DENDRO_731 = DENDRO_198*DENDRO_258; const double DENDRO_732 = -DENDRO_730 - DENDRO_731; const double DENDRO_733 = 0.5*DENDRO_200; const double DENDRO_734 = DENDRO_211*DENDRO_230; const double DENDRO_735 = -DENDRO_733 - DENDRO_734; const double DENDRO_736 = DENDRO_179*DENDRO_199 + DENDRO_211*DENDRO_417; const double DENDRO_737 = DENDRO_418 + DENDRO_667; const double DENDRO_738 = DENDRO_130*DENDRO_269 + DENDRO_726; const double DENDRO_739 = DENDRO_19*(-DENDRO_23*(DENDRO_682 + alpha[pp]*(-DENDRO_102*(DENDRO_541 + DENDRO_661) - DENDRO_102*(DENDRO_267*DENDRO_49 + DENDRO_628) + DENDRO_115*(DENDRO_241*DENDRO_53 + DENDRO_395) + DENDRO_123*(DENDRO_425 + DENDRO_732) + DENDRO_123*(DENDRO_517 + DENDRO_670) + DENDRO_123*(DENDRO_665 + DENDRO_711) + DENDRO_123*(DENDRO_666 + DENDRO_737) + DENDRO_123*(DENDRO_720 + DENDRO_737) + DENDRO_123*(DENDRO_557 + DENDRO_636 + DENDRO_659) - DENDRO_129*(DENDRO_510 + DENDRO_668) - DENDRO_129*(DENDRO_510 + DENDRO_718) - DENDRO_129*(DENDRO_545 + DENDRO_735) - DENDRO_129*(DENDRO_547 + DENDRO_572 + DENDRO_713) + DENDRO_134*(DENDRO_724 + DENDRO_729) + DENDRO_134*(DENDRO_727 + DENDRO_738) + DENDRO_134*(DENDRO_136*DENDRO_214 + DENDRO_738) + DENDRO_134*(DENDRO_258*DENDRO_50 + DENDRO_725) + DENDRO_134*(DENDRO_241*DENDRO_55 + DENDRO_707 + DENDRO_708) - DENDRO_177*(0.5*DENDRO_448 + DENDRO_719) - DENDRO_177*(DENDRO_456 + DENDRO_736) - DENDRO_177*(DENDRO_135 + DENDRO_136*DENDRO_57 + DENDRO_698) - DENDRO_323*(1.0*DENDRO_411 + DENDRO_716) - DENDRO_323*(0.5*DENDRO_420 + DENDRO_722) - DENDRO_323*(DENDRO_136*DENDRO_241 + DENDRO_413 + DENDRO_703) - DENDRO_656*(DENDRO_195 + DENDRO_504) - DENDRO_656*(DENDRO_506 + DENDRO_630) + DENDRO_684 + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 - DENDRO_715*(DENDRO_333 + DENDRO_513 + DENDRO_560))) - DENDRO_23*(DENDRO_682 + alpha[pp]*(-DENDRO_102*(DENDRO_567 + DENDRO_635) + DENDRO_115*(DENDRO_117*DENDRO_214 + DENDRO_442) + DENDRO_123*(DENDRO_637 + DENDRO_732) + DENDRO_123*(DENDRO_711 + DENDRO_712) + DENDRO_123*(DENDRO_646 + DENDRO_667 + DENDRO_720) - DENDRO_129*(DENDRO_202 + DENDRO_735) - DENDRO_129*(DENDRO_444 + DENDRO_718) - DENDRO_129*(DENDRO_579 + DENDRO_631) - DENDRO_129*(DENDRO_544 + DENDRO_565 + DENDRO_654) + DENDRO_134*(DENDRO_724 + DENDRO_725) + DENDRO_134*(DENDRO_211*DENDRO_238 + DENDRO_729) + DENDRO_134*(DENDRO_241*DENDRO_54 + DENDRO_709) + DENDRO_134*(-DENDRO_140*DENDRO_214 + DENDRO_726 + DENDRO_727) - DENDRO_177*(0.5*DENDRO_454 + DENDRO_736) - DENDRO_177*(DENDRO_214*DENDRO_54 + DENDRO_451 + DENDRO_719) - DENDRO_323*(DENDRO_435 + DENDRO_722) - DENDRO_323*(DENDRO_701 + DENDRO_703) - DENDRO_323*(DENDRO_433 + DENDRO_716 + DENDRO_717) + DENDRO_559*(DENDRO_421 + DENDRO_618) + DENDRO_559*(DENDRO_530 + DENDRO_617) + DENDRO_684 + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 + DENDRO_699 + DENDRO_700 + DENDRO_704 + DENDRO_706 + DENDRO_710 + DENDRO_714 - DENDRO_715*(DENDRO_360 + DENDRO_563 + DENDRO_658))) - DENDRO_25*(DENDRO_595 + alpha[pp]*(-DENDRO_102*(1.0*DENDRO_329 + DENDRO_614) - DENDRO_102*(0.5*DENDRO_359 + DENDRO_652) - DENDRO_102*(DENDRO_226*DENDRO_255 + DENDRO_355 + DENDRO_633) + DENDRO_123*(DENDRO_619 + DENDRO_622) + DENDRO_123*(DENDRO_640 + DENDRO_641) + DENDRO_123*(-DENDRO_140*DENDRO_274 + DENDRO_650) + DENDRO_123*(DENDRO_226*DENDRO_241 + DENDRO_622) + DENDRO_123*(DENDRO_238*DENDRO_255 + DENDRO_644 + DENDRO_645) - DENDRO_129*(DENDRO_330 + DENDRO_516) - DENDRO_129*(DENDRO_330 + DENDRO_623) - DENDRO_129*(DENDRO_347 + DENDRO_626) - DENDRO_129*(DENDRO_361 + DENDRO_651) - DENDRO_129*(DENDRO_508 + DENDRO_628) - DENDRO_129*(DENDRO_543 + DENDRO_635) + DENDRO_134*(DENDRO_519 + DENDRO_616) + DENDRO_134*(DENDRO_646 + DENDRO_648) + DENDRO_134*(DENDRO_397 + DENDRO_616 + DENDRO_624) + DENDRO_134*(DENDRO_636 + DENDRO_637 + DENDRO_638) - DENDRO_177*(DENDRO_512 + DENDRO_631) - DENDRO_177*(DENDRO_113*DENDRO_136 + DENDRO_573) - DENDRO_323*(0.5*DENDRO_406 + DENDRO_615) - DENDRO_323*(DENDRO_428 + DENDRO_643) - DENDRO_323*(DENDRO_226*DENDRO_258 + DENDRO_431 + DENDRO_625) + DENDRO_328*(DENDRO_218*DENDRO_255 + DENDRO_339) + DENDRO_501*(DENDRO_424 + DENDRO_617) + DENDRO_501*(DENDRO_498 + DENDRO_618) + DENDRO_599 + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 - DENDRO_629*(DENDRO_201 + DENDRO_505 + DENDRO_630))) - DENDRO_25*(DENDRO_595 + alpha[pp]*(-DENDRO_102*(DENDRO_367 + DENDRO_652) - DENDRO_102*(DENDRO_633 + DENDRO_655) + DENDRO_123*(DENDRO_640 + DENDRO_650) + DENDRO_123*(DENDRO_641 - DENDRO_672) + DENDRO_123*(DENDRO_645 + DENDRO_664) + DENDRO_123*(-DENDRO_230*DENDRO_241 + DENDRO_671) + DENDRO_123*(DENDRO_237*DENDRO_255 + DENDRO_664) - DENDRO_129*(DENDRO_542 + DENDRO_626) - DENDRO_129*(DENDRO_542 + DENDRO_661) - DENDRO_129*(DENDRO_578 + DENDRO_651) + DENDRO_134*(DENDRO_422 + DENDRO_648) + DENDRO_134*(DENDRO_518 + DENDRO_670) + DENDRO_134*(DENDRO_624 + DENDRO_665) + DENDRO_134*(DENDRO_638 + DENDRO_660) + DENDRO_134*(DENDRO_659 + DENDRO_660) + DENDRO_134*(DENDRO_499 + DENDRO_666 + DENDRO_667) - DENDRO_177*(DENDRO_511 + DENDRO_668) - DENDRO_177*(DENDRO_197*DENDRO_199 + DENDRO_654) - DENDRO_323*(DENDRO_625 + DENDRO_653) - DENDRO_323*(DENDRO_643 + DENDRO_673) - DENDRO_323*(DENDRO_237*DENDRO_241 + DENDRO_412 + DENDRO_615) + DENDRO_328*(DENDRO_216*DENDRO_241 + DENDRO_394) + DENDRO_599 + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 - DENDRO_656*(DENDRO_564 + DENDRO_658) + DENDRO_657 + DENDRO_662 + DENDRO_663 + DENDRO_669)) + DENDRO_26*(DENDRO_307*(DENDRO_277 + DENDRO_310*DENDRO_386) + 4*DENDRO_36*(DENDRO_37*(DENDRO_387 + DENDRO_388*DENDRO_389*DENDRO_80) + DENDRO_390) - 4*DENDRO_384 + DENDRO_385*(DENDRO_258 + DENDRO_65*gt3[pp]) + alpha[pp]*(-DENDRO_101*DENDRO_241*DENDRO_26*DENDRO_399 + DENDRO_115*(DENDRO_408 + DENDRO_409) + DENDRO_115*(DENDRO_117*DENDRO_269 + DENDRO_411) + DENDRO_115*(DENDRO_199*DENDRO_218 + DENDRO_420) + DENDRO_117*DENDRO_382 + DENDRO_123*(1.0*DENDRO_407 + DENDRO_412) + DENDRO_123*(DENDRO_428 + DENDRO_429) + DENDRO_123*(DENDRO_431 - DENDRO_432) - DENDRO_129*(-1.0*DENDRO_423 + DENDRO_425) - DENDRO_129*(DENDRO_249*DENDRO_417 + DENDRO_414) - DENDRO_129*(DENDRO_267*DENDRO_55 + DENDRO_422) - DENDRO_129*(DENDRO_269*DENDRO_417 + DENDRO_418) + DENDRO_134*(1.0*DENDRO_409 + DENDRO_413) + DENDRO_134*(DENDRO_433 + DENDRO_434) + DENDRO_134*(DENDRO_435 + DENDRO_436) - DENDRO_199*DENDRO_404 - DENDRO_207*(DENDRO_372*(DENDRO_233*DENDRO_378 + DENDRO_257 + DENDRO_29*DENDRO_439) + DENDRO_377*(DENDRO_234 + DENDRO_238*DENDRO_315 + DENDRO_439*DENDRO_81) + DENDRO_379*(DENDRO_233*DENDRO_440 + DENDRO_276 + DENDRO_31*DENDRO_439) + DENDRO_437) + DENDRO_218*DENDRO_381 + DENDRO_233*DENDRO_380 - DENDRO_267*DENDRO_403 - DENDRO_269*DENDRO_401 + DENDRO_328*(DENDRO_406 + DENDRO_407) + DENDRO_328*(DENDRO_117*DENDRO_267 + DENDRO_419) + DENDRO_328*(DENDRO_218*DENDRO_249 + DENDRO_410) + DENDRO_383*gt3[pp] + DENDRO_398 - DENDRO_400*(DENDRO_228 - DENDRO_335) - DENDRO_405*(DENDRO_136 + DENDRO_138))) + DENDRO_29*(DENDRO_472 + alpha[pp]*(-DENDRO_102*(1.0*DENDRO_353 + DENDRO_497) - DENDRO_102*(DENDRO_231*DENDRO_417 + DENDRO_556) - DENDRO_102*(DENDRO_197*DENDRO_255 + DENDRO_356 + DENDRO_535) + DENDRO_123*(DENDRO_334 + DENDRO_555) + DENDRO_123*(DENDRO_348 + DENDRO_526) + DENDRO_123*(DENDRO_357 + DENDRO_508) + DENDRO_123*(DENDRO_357 + DENDRO_527) + DENDRO_123*(DENDRO_514 + DENDRO_516) + DENDRO_123*(DENDRO_541 + DENDRO_543) - DENDRO_128*(DENDRO_255*DENDRO_48 + DENDRO_341) - DENDRO_129*(DENDRO_522 + DENDRO_523) - DENDRO_129*(DENDRO_537 + DENDRO_538) - DENDRO_129*(DENDRO_197*DENDRO_214 + DENDRO_522) - DENDRO_129*(DENDRO_231*DENDRO_55 + DENDRO_549) - DENDRO_129*(DENDRO_255*DENDRO_50 + DENDRO_551 + DENDRO_552) + DENDRO_134*(DENDRO_510 + DENDRO_512) + DENDRO_134*(DENDRO_510 + DENDRO_532) + DENDRO_134*(DENDRO_547 + DENDRO_548) + DENDRO_134*(DENDRO_544 + DENDRO_545 + DENDRO_546) - DENDRO_177*(DENDRO_133 + DENDRO_539) - DENDRO_177*(0.5*DENDRO_446 + DENDRO_500) - DENDRO_177*(DENDRO_197*DENDRO_211 + DENDRO_455 + DENDRO_533) - DENDRO_323*(DENDRO_517 + DENDRO_519) - DENDRO_323*(DENDRO_267*DENDRO_54 + DENDRO_499) + DENDRO_477 + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 + DENDRO_501*(DENDRO_201 + DENDRO_506) + DENDRO_501*(DENDRO_502 + DENDRO_504) - DENDRO_528*(DENDRO_424 + DENDRO_529 + DENDRO_530))) + DENDRO_29*(DENDRO_472 + alpha[pp]*(-DENDRO_102*(DENDRO_535 + DENDRO_558) - DENDRO_102*(DENDRO_274*DENDRO_49 + DENDRO_369 + DENDRO_497) + DENDRO_123*(DENDRO_526 + DENDRO_542) + DENDRO_123*(DENDRO_527 + DENDRO_578) + DENDRO_123*(DENDRO_542 + DENDRO_567) + DENDRO_123*(DENDRO_555 + DENDRO_581) - DENDRO_128*(DENDRO_110*DENDRO_214 + DENDRO_441) - DENDRO_129*(DENDRO_552 + DENDRO_570) - DENDRO_129*(DENDRO_255*DENDRO_49 + DENDRO_570) - DENDRO_129*(-DENDRO_198*DENDRO_214 + DENDRO_521 + DENDRO_523) + DENDRO_134*(DENDRO_443 + DENDRO_532) + DENDRO_134*(DENDRO_511 + DENDRO_579) + DENDRO_134*(DENDRO_546 + DENDRO_566) + DENDRO_134*(DENDRO_565 + DENDRO_566) - DENDRO_177*(1.0*DENDRO_445 + DENDRO_533) - DENDRO_177*(DENDRO_214*DENDRO_49 + DENDRO_450 + DENDRO_500) - DENDRO_323*(DENDRO_199*DENDRO_226 + DENDRO_557) + DENDRO_477 + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 - DENDRO_528*(DENDRO_421 + DENDRO_498 + DENDRO_575) + DENDRO_559*(DENDRO_360 + DENDRO_564) + DENDRO_562 + DENDRO_569 + DENDRO_574 + DENDRO_576 + DENDRO_577 + DENDRO_582 + DENDRO_583 + DENDRO_584)) + DENDRO_31*(-4*DENDRO_35 + DENDRO_385*(DENDRO_211 + DENDRO_66) + 4*DENDRO_58 + 4*DENDRO_73*(-DENDRO_75 - DENDRO_77 + DENDRO_79 + DENDRO_86) + alpha[pp]*(-DENDRO_114*DENDRO_178*DENDRO_211*DENDRO_31 + DENDRO_115*(DENDRO_448 + DENDRO_449) + DENDRO_115*(DENDRO_199*DENDRO_48 + DENDRO_454) + DENDRO_123*(-1.0*DENDRO_194 + DENDRO_196) + DENDRO_123*(-1.0*DENDRO_200 + DENDRO_202) + DENDRO_123*(DENDRO_452 + 0.5*DENDRO_453) + DENDRO_123*(DENDRO_262*DENDRO_54 + DENDRO_443) + DENDRO_123*(DENDRO_269*DENDRO_49 + DENDRO_444) - DENDRO_128*(DENDRO_446 + DENDRO_447) - DENDRO_128*(DENDRO_167*DENDRO_48 + DENDRO_445) - DENDRO_129*(1.0*DENDRO_447 + DENDRO_450) - DENDRO_129*(-DENDRO_206*DENDRO_211 + DENDRO_455) + DENDRO_134*(1.0*DENDRO_449 + DENDRO_451) + DENDRO_134*(DENDRO_126*DENDRO_211 + DENDRO_456) + DENDRO_141 - DENDRO_142*DENDRO_441 - DENDRO_149*DENDRO_442 - DENDRO_156*(-DENDRO_124 + DENDRO_138) - DENDRO_157*DENDRO_279 - DENDRO_168*(-DENDRO_160 + DENDRO_162) - DENDRO_176*DENDRO_199 - DENDRO_207*(DENDRO_209 + DENDRO_372*(DENDRO_315*DENDRO_55 + DENDRO_71) + DENDRO_377*(DENDRO_315*DENDRO_50 + DENDRO_44*DENDRO_440 + DENDRO_56) + DENDRO_379*(DENDRO_158 + DENDRO_55*DENDRO_81)) + DENDRO_380*DENDRO_53 + DENDRO_381*DENDRO_48 + DENDRO_382*DENDRO_44 + DENDRO_383*gt0[pp])) + DENDRO_33*(-4*DENDRO_305 + 4*DENDRO_306 + DENDRO_307*(DENDRO_274 + DENDRO_311) + 4*DENDRO_59*(-DENDRO_312 + DENDRO_313 - DENDRO_314 + DENDRO_318) + alpha[pp]*(DENDRO_110*DENDRO_382 + DENDRO_123*(1.0*DENDRO_350 + DENDRO_355) + DENDRO_123*(DENDRO_367 + DENDRO_368) - DENDRO_128*(DENDRO_351 + DENDRO_352) - DENDRO_128*(DENDRO_110*DENDRO_262 + DENDRO_353) - DENDRO_129*(1.0*DENDRO_352 + DENDRO_356) - DENDRO_129*(DENDRO_363 + DENDRO_364) - DENDRO_129*(DENDRO_274*DENDRO_370 + DENDRO_369) + DENDRO_134*(DENDRO_357 + 0.5*DENDRO_358) + DENDRO_134*(DENDRO_167*DENDRO_226 + DENDRO_348) + DENDRO_134*(DENDRO_197*DENDRO_249 + DENDRO_347) + DENDRO_134*(DENDRO_267*DENDRO_50 + DENDRO_361) - DENDRO_149*DENDRO_339 - DENDRO_207*(DENDRO_371 + DENDRO_372*(DENDRO_252 + DENDRO_29*DENDRO_376 + DENDRO_315*DENDRO_374) + DENDRO_377*(DENDRO_224*DENDRO_378 + DENDRO_26*DENDRO_374 + DENDRO_376*DENDRO_81) + DENDRO_379*(DENDRO_271 + DENDRO_31*DENDRO_376 + DENDRO_374*DENDRO_81)) + DENDRO_216*DENDRO_380 + DENDRO_224*DENDRO_381 - DENDRO_256*DENDRO_342 - DENDRO_262*DENDRO_343 - DENDRO_267*DENDRO_344 + DENDRO_328*(DENDRO_349 + DENDRO_350) + DENDRO_328*(DENDRO_110*DENDRO_267 + DENDRO_359) + DENDRO_338 - DENDRO_340*DENDRO_341 - DENDRO_345*(DENDRO_162 + DENDRO_197) - DENDRO_346*(DENDRO_226 + DENDRO_228) + DENDRO_383*gt5[pp]))); const double DENDRO_740 = grad_1_beta0[pp]; const double DENDRO_741 = grad_1_beta2[pp]; const double DENDRO_742 = (1.0L/3.0L)*At1[pp]; const double DENDRO_743 = (2.0L/3.0L)*DENDRO_3; const double DENDRO_744 = At4[pp]*DENDRO_25; const double DENDRO_745 = -At3[pp]*DENDRO_26 + DENDRO_28 + DENDRO_744; const double DENDRO_746 = -At1[pp]*DENDRO_31 + At3[pp]*DENDRO_23 - At4[pp]*DENDRO_29; const double DENDRO_747 = -At1[pp]*DENDRO_29 + At3[pp]*DENDRO_25 - At4[pp]*DENDRO_33; const double DENDRO_748 = 6.0*DENDRO_36; const double DENDRO_749 = DENDRO_19*DENDRO_42; const double DENDRO_750 = 6.0*DENDRO_73; const double DENDRO_751 = 1.0*DENDRO_101*DENDRO_25; const double DENDRO_752 = -DENDRO_246 + DENDRO_597; const double DENDRO_753 = DENDRO_126*DENDRO_752; const double DENDRO_754 = -DENDRO_264 + DENDRO_598; const double DENDRO_755 = DENDRO_126*DENDRO_154; const double DENDRO_756 = DENDRO_53*DENDRO_754 + DENDRO_755; const double DENDRO_757 = DENDRO_48*DENDRO_754; const double DENDRO_758 = DENDRO_126*DENDRO_147; const double DENDRO_759 = DENDRO_111*DENDRO_147; const double DENDRO_760 = DENDRO_234 - DENDRO_239 + DENDRO_240; const double DENDRO_761 = DENDRO_154*DENDRO_426; const double DENDRO_762 = DENDRO_179*DENDRO_752; const double DENDRO_763 = DENDRO_226*DENDRO_72; const double DENDRO_764 = DENDRO_183*DENDRO_627; const double DENDRO_765 = DENDRO_180 + DENDRO_181; const double DENDRO_766 = DENDRO_132*DENDRO_147; const double DENDRO_767 = DENDRO_159*DENDRO_525 + DENDRO_509*DENDRO_754; const double DENDRO_768 = -DENDRO_173*DENDRO_430; const double DENDRO_769 = -0.25*DENDRO_117*DENDRO_25 + 0.25*DENDRO_119*DENDRO_33 + 0.25*DENDRO_29*DENDRO_53; const double DENDRO_770 = 0.5*DENDRO_19*DENDRO_40; const double DENDRO_771 = 0.5*DENDRO_19*DENDRO_38; const double DENDRO_772 = 0.5*DENDRO_19*DENDRO_39; const double DENDRO_773 = grad_2_beta0[pp]; const double DENDRO_774 = grad_2_beta1[pp]; const double DENDRO_775 = (1.0L/3.0L)*At2[pp]; const double DENDRO_776 = (2.0L/3.0L)*DENDRO_2; const double DENDRO_777 = At2[pp]*DENDRO_23 - At4[pp]*DENDRO_26 + At5[pp]*DENDRO_25; const double DENDRO_778 = -At2[pp]*DENDRO_31 + At4[pp]*DENDRO_23 - At5[pp]*DENDRO_29; const double DENDRO_779 = -At5[pp]*DENDRO_33 + DENDRO_30 + DENDRO_744; const double DENDRO_780 = 6.0*DENDRO_59; const double DENDRO_781 = DENDRO_110*DENDRO_154 + DENDRO_757; const double DENDRO_782 = DENDRO_173*DENDRO_354; const double DENDRO_783 = -DENDRO_782; const double DENDRO_784 = DENDRO_183*DENDRO_335; const double DENDRO_785 = DENDRO_191 + DENDRO_763; const double DENDRO_786 = DENDRO_119*DENDRO_154; const double DENDRO_787 = DENDRO_160*DENDRO_183; const double DENDRO_788 = DENDRO_550*DENDRO_72; const double DENDRO_789 = DENDRO_252 - DENDRO_253 + DENDRO_254; const double DENDRO_790 = DENDRO_147*DENDRO_365; const double DENDRO_791 = DENDRO_271 + DENDRO_272 - DENDRO_273; const double DENDRO_792 = DENDRO_154*DENDRO_365; const double DENDRO_793 = DENDRO_119*DENDRO_147; const double DENDRO_794 = DENDRO_154*DENDRO_205; const double DENDRO_795 = (2.0L/3.0L)*DENDRO_0; const double DENDRO_796 = 2*At4[pp]; const double DENDRO_797 = 2*At3[pp]*DENDRO_19; const double DENDRO_798 = 2*At4[pp]*DENDRO_19; const double DENDRO_799 = 12*DENDRO_19*DENDRO_73; const double DENDRO_800 = DENDRO_218*DENDRO_760; const double DENDRO_801 = DENDRO_117*DENDRO_760; const double DENDRO_802 = DENDRO_173*DENDRO_335; const double DENDRO_803 = 1.0*DENDRO_126*DENDRO_31 + 1.0*DENDRO_216*DENDRO_29 - 1.0*DENDRO_218*DENDRO_23; const double DENDRO_804 = 0.25*DENDRO_755; const double DENDRO_805 = (1.0L/3.0L)*At4[pp]; const double DENDRO_806 = DENDRO_160*DENDRO_752; const double DENDRO_807 = DENDRO_636 - DENDRO_802; const double DENDRO_808 = -DENDRO_335*DENDRO_752 + DENDRO_644; const double DENDRO_809 = 0.25*DENDRO_126*DENDRO_31 + 0.25*DENDRO_216*DENDRO_29 - 0.25*DENDRO_218*DENDRO_23; const double DENDRO_810 = DENDRO_224*DENDRO_752; const double DENDRO_811 = DENDRO_216*DENDRO_789; const double DENDRO_812 = DENDRO_183*DENDRO_224; const double DENDRO_813 = DENDRO_110*DENDRO_789; // Dendro: printing variables At_rhs00[pp] = (4.0L/3.0L)*At0[pp]*DENDRO_0 - DENDRO_1*DENDRO_2 - DENDRO_1*DENDRO_3 + DENDRO_34*(-12*DENDRO_35 + 12*DENDRO_58 - DENDRO_60*(-DENDRO_66 + DENDRO_72) - 12*DENDRO_73*(DENDRO_75 + DENDRO_77 - DENDRO_79 - DENDRO_86) + DENDRO_739*gt0[pp] + DENDRO_87*(-DENDRO_115*(DENDRO_187 + DENDRO_188) - DENDRO_115*(DENDRO_173*DENDRO_48 + DENDRO_193) - DENDRO_123*(DENDRO_191 + 0.5*DENDRO_192) - DENDRO_123*(DENDRO_194 - DENDRO_196) - DENDRO_123*(DENDRO_200 + DENDRO_203) - DENDRO_123*(DENDRO_147*DENDRO_54 + DENDRO_180) - DENDRO_123*(DENDRO_154*DENDRO_49 + DENDRO_181) + DENDRO_128*(DENDRO_185 + DENDRO_186) + DENDRO_128*(DENDRO_183*DENDRO_48 + DENDRO_184) + DENDRO_129*(1.0*DENDRO_186 + DENDRO_189) - DENDRO_129*(-DENDRO_183*DENDRO_205 + DENDRO_206*DENDRO_72) - DENDRO_134*(1.0*DENDRO_188 + DENDRO_190) - DENDRO_134*(1.0*DENDRO_126*DENDRO_72 + DENDRO_173*DENDRO_205) + DENDRO_141 + DENDRO_142*DENDRO_148 + DENDRO_149*DENDRO_155 + DENDRO_156*(DENDRO_124 + DENDRO_139) + DENDRO_157*DENDRO_159*DENDRO_31 + DENDRO_168*(DENDRO_160 + DENDRO_163) + DENDRO_173*DENDRO_176 + DENDRO_177*DENDRO_178*DENDRO_72 - DENDRO_207*(DENDRO_209 + DENDRO_210*DENDRO_211 + DENDRO_212*DENDRO_57 + DENDRO_213*DENDRO_214) - DENDRO_244*DENDRO_53 - DENDRO_261*DENDRO_48 - DENDRO_280*DENDRO_44 - DENDRO_304*gt0[pp])) + DENDRO_4*DENDRO_5 + DENDRO_6*DENDRO_7 - alpha[pp]*(-At0[pp]*K[pp] + DENDRO_20*(At0[pp]*DENDRO_23 - At1[pp]*DENDRO_26 + At2[pp]*DENDRO_25) + DENDRO_27*(-At0[pp]*DENDRO_31 + DENDRO_28 + DENDRO_30) + DENDRO_32*(-At0[pp]*DENDRO_29 + At1[pp]*DENDRO_25 - At2[pp]*DENDRO_33)) + beta0[pp]*agrad_0_At0[pp] + beta1[pp]*agrad_1_At0[pp] + beta2[pp]*agrad_2_At0[pp]; At_rhs01[pp] = At0[pp]*DENDRO_740 - At1[pp]*DENDRO_743 + At2[pp]*DENDRO_741 + At3[pp]*DENDRO_5 + At4[pp]*DENDRO_7 + DENDRO_0*DENDRO_742 + DENDRO_2*DENDRO_742 + DENDRO_34*(-12*DENDRO_674 - DENDRO_675*(-6.0*DENDRO_117*DENDRO_25 + 6.0*DENDRO_119*DENDRO_33 + 6.0*DENDRO_29*DENDRO_53 - 6.0*DENDRO_37*DENDRO_64*gt1[pp]) + DENDRO_739*gt1[pp] + DENDRO_748*(DENDRO_37*(DENDRO_463 + DENDRO_749*gt1[pp]) + DENDRO_677) - DENDRO_750*(-DENDRO_678 + DENDRO_679 + DENDRO_680 - DENDRO_681) + DENDRO_87*(DENDRO_101*DENDRO_33*(DENDRO_757 + DENDRO_758 + DENDRO_759) + DENDRO_102*(-DENDRO_347 + DENDRO_540 + DENDRO_634) - DENDRO_108*DENDRO_261 - DENDRO_115*(DENDRO_117*DENDRO_159 + DENDRO_155) + DENDRO_123*(-DENDRO_525*DENDRO_760 + DENDRO_712) - DENDRO_123*(-DENDRO_637 + DENDRO_730 + DENDRO_731) + DENDRO_123*(DENDRO_147*DENDRO_426 - DENDRO_154*DENDRO_627 + DENDRO_667) + DENDRO_129*(DENDRO_766 + DENDRO_767) + DENDRO_129*(DENDRO_159*DENDRO_417 + DENDRO_765) + DENDRO_129*(DENDRO_203 + DENDRO_733 + DENDRO_734) + DENDRO_129*(DENDRO_762 + DENDRO_763 + DENDRO_764) + DENDRO_134*(-DENDRO_54*DENDRO_760 + DENDRO_709) + DENDRO_134*(-DENDRO_111*DENDRO_769 - DENDRO_238*DENDRO_72 + DENDRO_728) - DENDRO_134*(DENDRO_119*DENDRO_769 + DENDRO_126*DENDRO_769 + DENDRO_237*DENDRO_72) + DENDRO_134*(-DENDRO_132*DENDRO_154 + DENDRO_140*DENDRO_159 + DENDRO_726) - DENDRO_137*DENDRO_244 + DENDRO_177*(DENDRO_159*DENDRO_54 + DENDRO_159*DENDRO_55 + DENDRO_190) + DENDRO_177*(DENDRO_173*DENDRO_179 + 0.5*DENDRO_193 + DENDRO_417*DENDRO_72) - DENDRO_207*(DENDRO_121*DENDRO_770 + DENDRO_199*DENDRO_771 + DENDRO_269*DENDRO_772 + DENDRO_683) - DENDRO_280*DENDRO_51 - DENDRO_304*gt1[pp] + DENDRO_323*(-DENDRO_701 + DENDRO_702) - DENDRO_323*(-DENDRO_124*DENDRO_154 + DENDRO_717 + DENDRO_761) - DENDRO_323*(-DENDRO_173*DENDRO_324 + DENDRO_721 + DENDRO_768) + DENDRO_685 + DENDRO_686 + DENDRO_687 + DENDRO_688 + DENDRO_689 + DENDRO_690 + DENDRO_691 + DENDRO_692 + DENDRO_693 + DENDRO_694 + DENDRO_695 + DENDRO_696 + DENDRO_697 + DENDRO_699 + DENDRO_700 + DENDRO_704 + DENDRO_706 + DENDRO_710 + DENDRO_714 - DENDRO_751*(DENDRO_117*DENDRO_147 + DENDRO_756) - DENDRO_751*(DENDRO_173*DENDRO_216 + DENDRO_183*DENDRO_218 + DENDRO_753))) - alpha[pp]*(-At1[pp]*K[pp] + DENDRO_20*DENDRO_745 + DENDRO_27*DENDRO_746 + DENDRO_32*DENDRO_747) + beta0[pp]*agrad_0_At1[pp] + beta1[pp]*agrad_1_At1[pp] + beta2[pp]*agrad_2_At1[pp]; At_rhs02[pp] = At0[pp]*DENDRO_773 + At1[pp]*DENDRO_774 - At2[pp]*DENDRO_776 + At4[pp]*DENDRO_5 + At5[pp]*DENDRO_7 + DENDRO_0*DENDRO_775 + DENDRO_3*DENDRO_775 + DENDRO_34*(-12*DENDRO_457 + 6.0*DENDRO_458 + DENDRO_739*gt2[pp] - DENDRO_750*(DENDRO_467 + DENDRO_468 - DENDRO_469 - DENDRO_471) - DENDRO_780*(DENDRO_460 + DENDRO_461 - DENDRO_462 - DENDRO_465) + DENDRO_87*(DENDRO_101*DENDRO_26*(DENDRO_756 + DENDRO_786) + DENDRO_102*(DENDRO_534 - DENDRO_558) - DENDRO_102*(-DENDRO_147*DENDRO_160 - DENDRO_49*DENDRO_791 + DENDRO_790) - DENDRO_106*DENDRO_244 - DENDRO_123*(-DENDRO_553 + DENDRO_554 + DENDRO_580) + DENDRO_123*(DENDRO_365*DENDRO_752 + DENDRO_783 - DENDRO_784) - DENDRO_123*(DENDRO_525*DENDRO_789 + DENDRO_782 + DENDRO_784) + DENDRO_123*(-DENDRO_54*DENDRO_791 + DENDRO_792 - 0.25*DENDRO_793) + DENDRO_128*(DENDRO_110*DENDRO_159 + DENDRO_148) - DENDRO_129*(-DENDRO_147*DENDRO_205 + DENDRO_159*DENDRO_198 - DENDRO_520*DENDRO_791) - DENDRO_129*(DENDRO_183*DENDRO_365 - DENDRO_787 - DENDRO_788) + DENDRO_129*(DENDRO_49*DENDRO_789 + DENDRO_787 + DENDRO_788) - DENDRO_134*(0.25*DENDRO_192 + DENDRO_785) - DENDRO_134*(DENDRO_762 + DENDRO_785) - DENDRO_134*(DENDRO_767 + DENDRO_794) - DENDRO_134*(DENDRO_159*DENDRO_531 + DENDRO_765) - DENDRO_161*DENDRO_261 + DENDRO_177*(DENDRO_179*DENDRO_183 + 1.0*DENDRO_184) + DENDRO_177*(DENDRO_159*DENDRO_49 + DENDRO_159*DENDRO_50 + DENDRO_189) - DENDRO_207*(DENDRO_113*DENDRO_770 + DENDRO_167*DENDRO_771 + DENDRO_262*DENDRO_772 + DENDRO_473) - DENDRO_280*DENDRO_46 - DENDRO_304*gt2[pp] + DENDRO_323*(DENDRO_173*DENDRO_226 + 0.25*DENDRO_753) + DENDRO_479 + DENDRO_481 + DENDRO_483 + DENDRO_484 + DENDRO_486 + DENDRO_488 + DENDRO_490 + DENDRO_491 + DENDRO_492 + DENDRO_493 + DENDRO_494 + DENDRO_495 + DENDRO_496 + DENDRO_562 + DENDRO_569 + DENDRO_574 + DENDRO_576 + DENDRO_577 + DENDRO_582 + DENDRO_583 + DENDRO_584 - DENDRO_751*(DENDRO_758 + DENDRO_781))) - alpha[pp]*(-At2[pp]*K[pp] + DENDRO_20*DENDRO_777 + DENDRO_27*DENDRO_778 + DENDRO_32*DENDRO_779) + beta0[pp]*agrad_0_At2[pp] + beta1[pp]*agrad_1_At2[pp] + beta2[pp]*agrad_2_At2[pp]; At_rhs11[pp] = (4.0L/3.0L)*At3[pp]*DENDRO_2 - At3[pp]*DENDRO_743 - At3[pp]*DENDRO_795 + DENDRO_34*(12*DENDRO_36*(DENDRO_37*(DENDRO_387 + DENDRO_388*DENDRO_749) + DENDRO_390) - 12*DENDRO_384 + DENDRO_60*(DENDRO_258 - DENDRO_386*(DENDRO_316 - DENDRO_61)) + DENDRO_739*gt3[pp] + DENDRO_799*(DENDRO_277 - DENDRO_386*(-DENDRO_309 + DENDRO_84)) + DENDRO_87*(DENDRO_101*DENDRO_26*DENDRO_399*DENDRO_760 + DENDRO_115*(DENDRO_408 - DENDRO_801) + DENDRO_115*(-DENDRO_117*DENDRO_154 + DENDRO_411) + DENDRO_115*(-DENDRO_173*DENDRO_218 + DENDRO_420) - DENDRO_117*DENDRO_280 + DENDRO_123*(DENDRO_412 - 1.0*DENDRO_800) - DENDRO_123*(DENDRO_427 - 1.0*DENDRO_429) - DENDRO_123*(DENDRO_430*DENDRO_752 + DENDRO_432) + DENDRO_129*(DENDRO_423 - DENDRO_425) + DENDRO_129*(DENDRO_130*DENDRO_754 + DENDRO_154*DENDRO_417) + DENDRO_129*(DENDRO_417*DENDRO_752 + DENDRO_802) + DENDRO_129*(DENDRO_55*DENDRO_803 + DENDRO_804) + DENDRO_134*(DENDRO_413 - 1.0*DENDRO_801) + DENDRO_134*(DENDRO_434 + DENDRO_761) + DENDRO_134*(DENDRO_436 + DENDRO_768) + DENDRO_154*DENDRO_401 + DENDRO_173*DENDRO_404 - DENDRO_207*(DENDRO_210*DENDRO_258 + DENDRO_212*DENDRO_241 + DENDRO_213*DENDRO_277 + DENDRO_437) - DENDRO_218*DENDRO_261 - DENDRO_233*DENDRO_244 - DENDRO_304*gt3[pp] + DENDRO_328*(DENDRO_406 - DENDRO_800) + DENDRO_328*(-DENDRO_117*DENDRO_754 + DENDRO_419) + DENDRO_328*(-DENDRO_218*DENDRO_752 + DENDRO_410) + DENDRO_398 + DENDRO_400*(DENDRO_229 + DENDRO_335) + DENDRO_403*DENDRO_754 + DENDRO_405*(DENDRO_139 + DENDRO_438))) + DENDRO_4*DENDRO_740 + DENDRO_741*DENDRO_796 - alpha[pp]*(-At3[pp]*K[pp] + DENDRO_20*DENDRO_746 + DENDRO_745*DENDRO_797 + DENDRO_747*DENDRO_798) + beta0[pp]*agrad_0_At3[pp] + beta1[pp]*agrad_1_At3[pp] + beta2[pp]*agrad_2_At3[pp]; At_rhs12[pp] = At1[pp]*DENDRO_773 + At2[pp]*DENDRO_740 + At3[pp]*DENDRO_774 - At4[pp]*DENDRO_795 + At5[pp]*DENDRO_741 + DENDRO_2*DENDRO_805 + DENDRO_3*DENDRO_805 + DENDRO_34*(-12*DENDRO_585 - DENDRO_586*(6.0*DENDRO_126*DENDRO_31 + 6.0*DENDRO_216*DENDRO_29 - 6.0*DENDRO_218*DENDRO_23 - 6.0*DENDRO_310*DENDRO_37*gt4[pp]) + DENDRO_739*gt4[pp] + DENDRO_748*(DENDRO_37*(DENDRO_470 + DENDRO_749*gt4[pp]) + DENDRO_589) - DENDRO_780*(-DENDRO_590 + DENDRO_591 + DENDRO_592 - DENDRO_594) + DENDRO_87*(DENDRO_101*DENDRO_29*(DENDRO_759 + DENDRO_781) + DENDRO_102*(DENDRO_632 - DENDRO_655) - DENDRO_102*(-DENDRO_160*DENDRO_754 + DENDRO_365*DENDRO_754 - DENDRO_417*DENDRO_791) - DENDRO_103*DENDRO_280 + DENDRO_123*(DENDRO_230*DENDRO_760 + DENDRO_671) + DENDRO_123*(-DENDRO_237*DENDRO_789 + DENDRO_808) + DENDRO_123*(DENDRO_336*DENDRO_752 + DENDRO_808) - DENDRO_123*(DENDRO_119*DENDRO_809 + DENDRO_136*DENDRO_791 + DENDRO_672) + DENDRO_123*(-DENDRO_126*DENDRO_809 - DENDRO_627*DENDRO_754 + DENDRO_649) - DENDRO_129*(DENDRO_183*DENDRO_336 + DENDRO_783 - DENDRO_806) - DENDRO_129*(-DENDRO_205*DENDRO_754 - DENDRO_55*DENDRO_791 + DENDRO_792) + DENDRO_129*(DENDRO_417*DENDRO_789 + DENDRO_782 + DENDRO_806) + DENDRO_134*(-DENDRO_183*DENDRO_324 + DENDRO_807) + DENDRO_134*(-DENDRO_531*DENDRO_760 + DENDRO_665) + DENDRO_134*(-DENDRO_627*DENDRO_752 + DENDRO_807) + DENDRO_134*(-DENDRO_124*DENDRO_147 + DENDRO_667 - 0.25*DENDRO_786) + DENDRO_134*(-DENDRO_132*DENDRO_754 + DENDRO_647 - DENDRO_804) + DENDRO_134*(-DENDRO_417*DENDRO_760 + DENDRO_518 + DENDRO_616) + DENDRO_177*(DENDRO_173*DENDRO_197 + DENDRO_764) + DENDRO_177*(DENDRO_180 + DENDRO_766 + DENDRO_794) - DENDRO_207*(DENDRO_221*DENDRO_770 + DENDRO_249*DENDRO_771 + DENDRO_267*DENDRO_772 + DENDRO_596) - DENDRO_227*DENDRO_261 - DENDRO_235*DENDRO_244 - DENDRO_304*gt4[pp] - DENDRO_323*(-DENDRO_324*DENDRO_752 + DENDRO_653) - DENDRO_323*(-DENDRO_124*DENDRO_754 + DENDRO_642 + DENDRO_673) - DENDRO_323*(-DENDRO_237*DENDRO_760 - DENDRO_238*DENDRO_760 + DENDRO_412) + DENDRO_328*(-DENDRO_216*DENDRO_760 + DENDRO_394) + DENDRO_600 + DENDRO_601 + DENDRO_603 + DENDRO_604 + DENDRO_605 + DENDRO_606 + DENDRO_607 + DENDRO_608 + DENDRO_609 + DENDRO_610 + DENDRO_611 + DENDRO_612 + DENDRO_613 + DENDRO_657 + DENDRO_662 + DENDRO_663 + DENDRO_669)) - alpha[pp]*(-At4[pp]*K[pp] + DENDRO_20*DENDRO_778 + DENDRO_777*DENDRO_797 + DENDRO_779*DENDRO_798) + beta0[pp]*agrad_0_At4[pp] + beta1[pp]*agrad_1_At4[pp] + beta2[pp]*agrad_2_At4[pp]; At_rhs22[pp] = (4.0L/3.0L)*At5[pp]*DENDRO_3 - At5[pp]*DENDRO_776 - At5[pp]*DENDRO_795 + DENDRO_34*(-12*DENDRO_305 + 12*DENDRO_306 - 12*DENDRO_59*(DENDRO_312 - DENDRO_313 + DENDRO_314 - DENDRO_318) + DENDRO_739*gt5[pp] - DENDRO_799*(-DENDRO_311 + DENDRO_791) + DENDRO_87*(DENDRO_110*DENDRO_183*DENDRO_340 - DENDRO_110*DENDRO_280 - DENDRO_123*(DENDRO_366 - 1.0*DENDRO_368) - DENDRO_123*(0.25*DENDRO_810 + 1.0*DENDRO_811) + DENDRO_128*(DENDRO_812 + DENDRO_813) + DENDRO_128*(DENDRO_110*DENDRO_147 + DENDRO_48*DENDRO_791) + DENDRO_129*(DENDRO_362 - 1.0*DENDRO_364) + DENDRO_129*(0.25*DENDRO_812 + 1.0*DENDRO_813) - DENDRO_129*(-DENDRO_370*DENDRO_791 + DENDRO_790) - DENDRO_134*(DENDRO_179*DENDRO_754 + 0.5*DENDRO_793) - DENDRO_134*(DENDRO_183*DENDRO_226 + DENDRO_806) - DENDRO_134*(DENDRO_197*DENDRO_752 + DENDRO_784) - DENDRO_134*(DENDRO_50*DENDRO_803 + 0.25*DENDRO_758) + DENDRO_147*DENDRO_343 + DENDRO_149*DENDRO_216*DENDRO_752 - DENDRO_207*(DENDRO_210*DENDRO_255 + DENDRO_212*DENDRO_231 + DENDRO_213*DENDRO_274 + DENDRO_371) - DENDRO_216*DENDRO_244 - DENDRO_224*DENDRO_261 - DENDRO_304*gt5[pp] - DENDRO_328*(DENDRO_810 + DENDRO_811) - DENDRO_328*(DENDRO_110*DENDRO_754 + DENDRO_126*DENDRO_791) + DENDRO_33*DENDRO_342*DENDRO_789 + DENDRO_338 + DENDRO_344*DENDRO_754 + DENDRO_345*(DENDRO_163 + DENDRO_375) + DENDRO_346*(DENDRO_229 + DENDRO_373))) + DENDRO_6*DENDRO_773 + DENDRO_774*DENDRO_796 - alpha[pp]*(At5[pp]*DENDRO_297*DENDRO_779 - At5[pp]*K[pp] + DENDRO_32*DENDRO_778 + DENDRO_777*DENDRO_798) + beta0[pp]*agrad_0_At5[pp] + beta1[pp]*agrad_1_At5[pp] + beta2[pp]*agrad_2_At5[pp]; // Dendro: reduced ops: 3569 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(At_rhs12, &__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs11, &__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs22, &__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs02, &__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs00, &__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(At_rhs01, &__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_At_rhs /**@brief compute K_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_K_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for K_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={4,4,4}; //input vars begin double * K = __sm_base + 0; double * gt1 = __sm_base + 64; double * beta1 = __sm_base + 128; double * gt3 = __sm_base + 192; double * At1 = __sm_base + 256; double * gt5 = __sm_base + 320; double * alpha = __sm_base + 384; double * gt4 = __sm_base + 448; double * gt2 = __sm_base + 512; double * At3 = __sm_base + 576; double * beta2 = __sm_base + 640; double * At4 = __sm_base + 704; double * At0 = __sm_base + 768; double * At2 = __sm_base + 832; double * beta0 = __sm_base + 896; double * gt0 = __sm_base + 960; double * chi = __sm_base + 1024; double * At5 = __sm_base + 1088; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 1152; double * grad_1_gt0 = __sm_base + 1216; double * grad2_0_1_alpha = __sm_base + 1280; double * grad2_2_2_alpha = __sm_base + 1344; double * grad_2_gt0 = __sm_base + 1408; double * grad_0_gt4 = __sm_base + 1472; double * grad_2_gt3 = __sm_base + 1536; double * grad_1_alpha = __sm_base + 1600; double * grad_2_alpha = __sm_base + 1664; double * grad2_1_1_alpha = __sm_base + 1728; double * grad_1_gt5 = __sm_base + 1792; double * grad_0_gt1 = __sm_base + 1856; double * grad_1_gt4 = __sm_base + 1920; double * agrad_2_K = __sm_base + 1984; double * grad_1_gt1 = __sm_base + 2048; double * grad_2_gt4 = __sm_base + 2112; double * grad_0_alpha = __sm_base + 2176; double * grad_0_chi = __sm_base + 2240; double * grad2_0_0_alpha = __sm_base + 2304; double * agrad_1_K = __sm_base + 2368; double * grad_2_gt2 = __sm_base + 2432; double * grad_1_chi = __sm_base + 2496; double * grad_0_gt0 = __sm_base + 2560; double * grad_0_gt3 = __sm_base + 2624; double * grad2_1_2_alpha = __sm_base + 2688; double * grad_2_gt5 = __sm_base + 2752; double * agrad_0_K = __sm_base + 2816; double * grad_1_gt3 = __sm_base + 2880; double * grad_2_chi = __sm_base + 2944; double * grad_2_gt1 = __sm_base + 3008; double * grad_0_gt2 = __sm_base + 3072; double * grad2_0_2_alpha = __sm_base + 3136; double * grad_1_gt2 = __sm_base + 3200; // deriv vars end // output vars begin double * K_rhs = __sm_base + 3264; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_K][offset],(double *) K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 3960 // Dendro: printing temp variables const double DENDRO_0 = pow(gt4[pp], 2); const double DENDRO_1 = DENDRO_0*gt0[pp]; const double DENDRO_2 = pow(gt1[pp], 2); const double DENDRO_3 = DENDRO_2*gt5[pp]; const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = DENDRO_4*gt3[pp]; const double DENDRO_6 = gt0[pp]*gt3[pp]; const double DENDRO_7 = DENDRO_6*gt5[pp]; const double DENDRO_8 = gt1[pp]*gt2[pp]; const double DENDRO_9 = 2*DENDRO_8*gt4[pp]; const double DENDRO_10 = DENDRO_1 + DENDRO_3 + DENDRO_5 - DENDRO_7 - DENDRO_9; const double DENDRO_11 = 1.0/DENDRO_10; const double DENDRO_12 = DENDRO_11*chi[pp]; const double DENDRO_13 = -DENDRO_2 + DENDRO_6; const double DENDRO_14 = grad_1_alpha[pp]; const double DENDRO_15 = 1.0/(-DENDRO_1 - DENDRO_3 - DENDRO_5 + DENDRO_7 + DENDRO_9); const double DENDRO_16 = DENDRO_14*DENDRO_15; const double DENDRO_17 = grad_2_gt5[pp]; const double DENDRO_18 = -0.5*gt0[pp]*gt4[pp] + 0.5*gt1[pp]*gt2[pp]; const double DENDRO_19 = gt2[pp]*gt4[pp]; const double DENDRO_20 = gt1[pp]*gt5[pp]; const double DENDRO_21 = DENDRO_19 - DENDRO_20; const double DENDRO_22 = grad_0_gt5[pp]; const double DENDRO_23 = -0.5*DENDRO_22 + 1.0*grad_2_gt2[pp]; const double DENDRO_24 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_25 = grad_1_gt5[pp]; const double DENDRO_26 = -0.5*DENDRO_25 + 1.0*grad_2_gt4[pp]; const double DENDRO_27 = 0.5*gt5[pp]; const double DENDRO_28 = 1.0/chi[pp]; const double DENDRO_29 = grad_2_chi[pp]; const double DENDRO_30 = gt0[pp]*gt4[pp]; const double DENDRO_31 = -DENDRO_30 + DENDRO_8; const double DENDRO_32 = grad_0_chi[pp]; const double DENDRO_33 = grad_1_chi[pp]; const double DENDRO_34 = DENDRO_21*DENDRO_32 + DENDRO_24*DENDRO_33 + DENDRO_29*DENDRO_31; const double DENDRO_35 = DENDRO_28*DENDRO_34; const double DENDRO_36 = grad_0_alpha[pp]; const double DENDRO_37 = DENDRO_15*DENDRO_36; const double DENDRO_38 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_39 = -DENDRO_0 + gt3[pp]*gt5[pp]; const double DENDRO_40 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_41 = DENDRO_21*DENDRO_33 + DENDRO_29*DENDRO_40 + DENDRO_32*DENDRO_39; const double DENDRO_42 = DENDRO_28*DENDRO_41; const double DENDRO_43 = grad_2_alpha[pp]; const double DENDRO_44 = DENDRO_13*DENDRO_15; const double DENDRO_45 = DENDRO_15*DENDRO_31; const double DENDRO_46 = DENDRO_15*DENDRO_40; const double DENDRO_47 = DENDRO_13*DENDRO_29 + DENDRO_31*DENDRO_33 + DENDRO_32*DENDRO_40; const double DENDRO_48 = DENDRO_15*DENDRO_43; const double DENDRO_49 = grad_1_gt3[pp]; const double DENDRO_50 = grad_0_gt3[pp]; const double DENDRO_51 = -0.5*DENDRO_50 + 1.0*grad_1_gt1[pp]; const double DENDRO_52 = grad_2_gt3[pp]; const double DENDRO_53 = -0.5*DENDRO_52 + 1.0*grad_1_gt4[pp]; const double DENDRO_54 = 0.5*gt3[pp]; const double DENDRO_55 = DENDRO_28*DENDRO_47; const double DENDRO_56 = -0.5*gt1[pp]*gt5[pp] + 0.5*gt2[pp]*gt4[pp]; const double DENDRO_57 = DENDRO_15*DENDRO_24; const double DENDRO_58 = DENDRO_15*DENDRO_21; const double DENDRO_59 = grad_0_gt0[pp]; const double DENDRO_60 = grad_1_gt0[pp]; const double DENDRO_61 = -0.5*DENDRO_60 + 1.0*grad_0_gt1[pp]; const double DENDRO_62 = grad_2_gt0[pp]; const double DENDRO_63 = -0.5*DENDRO_62 + 1.0*grad_0_gt2[pp]; const double DENDRO_64 = 0.5*gt0[pp]; const double DENDRO_65 = DENDRO_15*DENDRO_39; const double DENDRO_66 = 2*DENDRO_11*chi[pp]; const double DENDRO_67 = DENDRO_30 - DENDRO_8; const double DENDRO_68 = 0.5*DENDRO_15; const double DENDRO_69 = grad_1_gt2[pp]; const double DENDRO_70 = grad_2_gt1[pp]; const double DENDRO_71 = grad_0_gt4[pp]; const double DENDRO_72 = DENDRO_69 + DENDRO_70 - DENDRO_71; const double DENDRO_73 = 0.5*DENDRO_43; const double DENDRO_74 = DENDRO_15*gt4[pp]; const double DENDRO_75 = 0.5*DENDRO_14; const double DENDRO_76 = -DENDRO_69 + DENDRO_70 + DENDRO_71; const double DENDRO_77 = DENDRO_15*gt2[pp]; const double DENDRO_78 = 0.5*DENDRO_36; const double DENDRO_79 = -DENDRO_19 + DENDRO_20; const double DENDRO_80 = DENDRO_69 - DENDRO_70 + DENDRO_71; const double DENDRO_81 = DENDRO_15*gt1[pp]; const double DENDRO_82 = pow(DENDRO_10, -2); const double DENDRO_83 = 3*DENDRO_82; const double DENDRO_84 = pow(DENDRO_79, 2); const double DENDRO_85 = pow(DENDRO_40, 2); const double DENDRO_86 = DENDRO_40*DENDRO_79; const double DENDRO_87 = 2*At1[pp]*DENDRO_79; const double DENDRO_88 = 2*At2[pp]*DENDRO_40; const double DENDRO_89 = pow(DENDRO_67, 2); const double DENDRO_90 = DENDRO_67*DENDRO_79; const double DENDRO_91 = 2*At4[pp]*DENDRO_67; const double DENDRO_92 = DENDRO_40*DENDRO_67; const double DENDRO_93 = 6*DENDRO_82; const double DENDRO_94 = At0[pp]*DENDRO_39; const double DENDRO_95 = At5[pp]*DENDRO_13; const double DENDRO_96 = DENDRO_39*DENDRO_67; const double DENDRO_97 = DENDRO_13*DENDRO_79; const double DENDRO_98 = DENDRO_24*DENDRO_40; const double DENDRO_99 = At3[pp]*DENDRO_24; // Dendro: printing variables K_rhs[pp] = -DENDRO_12*DENDRO_13*(DENDRO_16*(DENDRO_17*DENDRO_18 + DENDRO_21*DENDRO_23 + DENDRO_24*DENDRO_26 + DENDRO_27*DENDRO_35) + DENDRO_37*(DENDRO_17*DENDRO_38 + DENDRO_21*DENDRO_26 + DENDRO_23*DENDRO_39 + DENDRO_27*DENDRO_42) + DENDRO_43*(0.5*DENDRO_17*DENDRO_44 + DENDRO_23*DENDRO_46 + DENDRO_26*DENDRO_45 - DENDRO_28*(-DENDRO_15*DENDRO_27*DENDRO_47 + 1.0*DENDRO_29)) - grad2_2_2_alpha[pp]) - DENDRO_12*DENDRO_24*(DENDRO_14*(-DENDRO_28*(-DENDRO_15*DENDRO_34*DENDRO_54 + 1.0*DENDRO_33) + DENDRO_45*DENDRO_53 + 0.5*DENDRO_49*DENDRO_57 + DENDRO_51*DENDRO_58) + DENDRO_37*(DENDRO_39*DENDRO_51 + DENDRO_40*DENDRO_53 + DENDRO_42*DENDRO_54 + DENDRO_49*DENDRO_56) + DENDRO_48*(DENDRO_13*DENDRO_53 + DENDRO_18*DENDRO_49 + DENDRO_40*DENDRO_51 + DENDRO_54*DENDRO_55) - grad2_1_1_alpha[pp]) - DENDRO_12*DENDRO_39*(DENDRO_16*(DENDRO_24*DENDRO_61 + DENDRO_31*DENDRO_63 + DENDRO_35*DENDRO_64 + DENDRO_56*DENDRO_59) + DENDRO_36*(-DENDRO_28*(-DENDRO_15*DENDRO_41*DENDRO_64 + 1.0*DENDRO_32) + DENDRO_46*DENDRO_63 + DENDRO_58*DENDRO_61 + 0.5*DENDRO_59*DENDRO_65) + DENDRO_48*(DENDRO_13*DENDRO_63 + DENDRO_31*DENDRO_61 + DENDRO_38*DENDRO_59 + DENDRO_55*DENDRO_64) - grad2_0_0_alpha[pp]) - DENDRO_40*DENDRO_66*(DENDRO_14*DENDRO_68*(DENDRO_21*DENDRO_62 + DENDRO_22*DENDRO_31 + DENDRO_24*DENDRO_76 + DENDRO_35*gt2[pp]) + DENDRO_73*(DENDRO_22*DENDRO_44 - DENDRO_28*(DENDRO_32 - DENDRO_47*DENDRO_77) + DENDRO_45*DENDRO_76 + DENDRO_46*DENDRO_62) + DENDRO_78*(DENDRO_22*DENDRO_46 - DENDRO_28*(DENDRO_29 - DENDRO_41*DENDRO_77) + DENDRO_58*DENDRO_76 + DENDRO_62*DENDRO_65) - grad2_0_2_alpha[pp]) + DENDRO_66*DENDRO_67*(DENDRO_36*DENDRO_68*(DENDRO_21*DENDRO_52 + DENDRO_25*DENDRO_40 + DENDRO_39*DENDRO_72 + DENDRO_42*gt4[pp]) + DENDRO_73*(DENDRO_25*DENDRO_44 - DENDRO_28*(DENDRO_33 - DENDRO_47*DENDRO_74) + DENDRO_45*DENDRO_52 + DENDRO_46*DENDRO_72) + DENDRO_75*(DENDRO_25*DENDRO_45 - DENDRO_28*(DENDRO_29 - DENDRO_34*DENDRO_74) + DENDRO_52*DENDRO_57 + DENDRO_58*DENDRO_72) - grad2_1_2_alpha[pp]) + DENDRO_66*DENDRO_79*(DENDRO_43*DENDRO_68*(DENDRO_13*DENDRO_80 + DENDRO_31*DENDRO_50 + DENDRO_40*DENDRO_60 + DENDRO_55*gt1[pp]) + DENDRO_75*(-DENDRO_28*(DENDRO_32 - DENDRO_34*DENDRO_81) + DENDRO_45*DENDRO_80 + DENDRO_50*DENDRO_57 + DENDRO_58*DENDRO_60) + DENDRO_78*(-DENDRO_28*(DENDRO_33 - DENDRO_41*DENDRO_81) + DENDRO_46*DENDRO_80 + DENDRO_50*DENDRO_58 + DENDRO_60*DENDRO_65) - grad2_0_1_alpha[pp]) + (1.0L/3.0L)*alpha[pp]*(At0[pp]*DENDRO_83*(At0[pp]*pow(DENDRO_39, 2) + At3[pp]*DENDRO_84 - 2*At4[pp]*DENDRO_86 + At5[pp]*DENDRO_85 - DENDRO_39*DENDRO_87 + DENDRO_39*DENDRO_88) + At1[pp]*DENDRO_93*(At1[pp]*DENDRO_24*DENDRO_39 + At1[pp]*DENDRO_84 - At2[pp]*DENDRO_86 - At2[pp]*DENDRO_96 + At4[pp]*DENDRO_90 + At4[pp]*DENDRO_98 - At5[pp]*DENDRO_92 - DENDRO_79*DENDRO_94 - DENDRO_79*DENDRO_99) + At2[pp]*DENDRO_93*(-At1[pp]*DENDRO_86 - At1[pp]*DENDRO_96 + At2[pp]*DENDRO_13*DENDRO_39 + At2[pp]*DENDRO_85 + At3[pp]*DENDRO_90 - At4[pp]*DENDRO_92 - At4[pp]*DENDRO_97 + DENDRO_40*DENDRO_94 + DENDRO_40*DENDRO_95) + At3[pp]*DENDRO_83*(At0[pp]*DENDRO_84 + 2*At2[pp]*DENDRO_90 + At3[pp]*pow(DENDRO_24, 2) + At5[pp]*DENDRO_89 - DENDRO_24*DENDRO_87 - DENDRO_24*DENDRO_91) + At4[pp]*DENDRO_93*(-At0[pp]*DENDRO_86 + At1[pp]*DENDRO_90 + At1[pp]*DENDRO_98 - At2[pp]*DENDRO_92 - At2[pp]*DENDRO_97 + At4[pp]*DENDRO_13*DENDRO_24 + At4[pp]*DENDRO_89 - DENDRO_67*DENDRO_95 - DENDRO_67*DENDRO_99) + At5[pp]*DENDRO_83*(At0[pp]*DENDRO_85 - 2*At1[pp]*DENDRO_92 + At3[pp]*DENDRO_89 + At5[pp]*pow(DENDRO_13, 2) + DENDRO_13*DENDRO_88 - DENDRO_13*DENDRO_91) + pow(K[pp], 2)) + beta0[pp]*agrad_0_K[pp] + beta1[pp]*agrad_1_K[pp] + beta2[pp]*agrad_2_K[pp]; // Dendro: reduced ops: 501 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(K_rhs, &__unzipOutVar[cuda::VAR::U_K][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_K_rhs /**@brief compute Gt_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_Gt_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for Gt_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 27; double * gt3 = __sm_base + 54; double * At1 = __sm_base + 81; double * gt5 = __sm_base + 108; double * alpha = __sm_base + 135; double * gt4 = __sm_base + 162; double * gt2 = __sm_base + 189; double * At3 = __sm_base + 216; double * beta2 = __sm_base + 243; double * At4 = __sm_base + 270; double * At0 = __sm_base + 297; double * At2 = __sm_base + 324; double * beta0 = __sm_base + 351; double * gt0 = __sm_base + 378; double * chi = __sm_base + 405; double * At5 = __sm_base + 432; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 459; double * grad_1_gt0 = __sm_base + 486; double * grad2_1_2_beta2 = __sm_base + 513; double * grad_2_K = __sm_base + 540; double * grad_0_beta0 = __sm_base + 567; double * grad_2_gt0 = __sm_base + 594; double * grad_0_gt4 = __sm_base + 621; double * grad2_1_1_beta1 = __sm_base + 648; double * grad_2_gt3 = __sm_base + 675; double * grad2_0_1_beta0 = __sm_base + 702; double * grad2_0_0_beta1 = __sm_base + 729; double * grad_1_K = __sm_base + 756; double * grad_1_alpha = __sm_base + 783; double * grad2_0_2_beta1 = __sm_base + 810; double * grad2_0_1_beta1 = __sm_base + 837; double * grad2_2_2_beta2 = __sm_base + 864; double * grad_1_beta1 = __sm_base + 891; double * grad_2_alpha = __sm_base + 918; double * grad_2_beta0 = __sm_base + 945; double * grad_0_gt1 = __sm_base + 972; double * grad_1_gt5 = __sm_base + 999; double * agrad_2_Gt1 = __sm_base + 1026; double * agrad_0_Gt2 = __sm_base + 1053; double * grad2_1_1_beta2 = __sm_base + 1080; double * grad_1_gt4 = __sm_base + 1107; double * grad2_2_2_beta1 = __sm_base + 1134; double * grad_1_gt1 = __sm_base + 1161; double * grad_2_gt4 = __sm_base + 1188; double * grad_0_beta1 = __sm_base + 1215; double * grad_0_alpha = __sm_base + 1242; double * grad_0_chi = __sm_base + 1269; double * grad_2_beta2 = __sm_base + 1296; double * grad2_1_2_beta0 = __sm_base + 1323; double * grad2_1_1_beta0 = __sm_base + 1350; double * agrad_0_Gt1 = __sm_base + 1377; double * grad_0_K = __sm_base + 1404; double * grad2_0_2_beta2 = __sm_base + 1431; double * agrad_0_Gt0 = __sm_base + 1458; double * agrad_1_Gt1 = __sm_base + 1485; double * grad2_0_0_beta0 = __sm_base + 1512; double * agrad_1_Gt2 = __sm_base + 1539; double * agrad_1_Gt0 = __sm_base + 1566; double * grad_2_gt2 = __sm_base + 1593; double * grad_1_chi = __sm_base + 1620; double * grad_0_gt0 = __sm_base + 1647; double * grad_0_gt3 = __sm_base + 1674; double * grad2_2_2_beta0 = __sm_base + 1701; double * agrad_2_Gt2 = __sm_base + 1728; double * grad_2_beta1 = __sm_base + 1755; double * grad_2_gt5 = __sm_base + 1782; double * grad_1_beta0 = __sm_base + 1809; double * grad2_0_1_beta2 = __sm_base + 1836; double * grad_1_gt3 = __sm_base + 1863; double * grad2_0_2_beta0 = __sm_base + 1890; double * grad_2_chi = __sm_base + 1917; double * grad_2_gt1 = __sm_base + 1944; double * agrad_2_Gt0 = __sm_base + 1971; double * grad_0_gt2 = __sm_base + 1998; double * grad_0_beta2 = __sm_base + 2025; double * grad_1_beta2 = __sm_base + 2052; double * grad2_0_0_beta2 = __sm_base + 2079; double * grad_1_gt2 = __sm_base + 2106; double * grad2_1_2_beta1 = __sm_base + 2133; // deriv vars end // output vars begin double * Gt_rhs0 = __sm_base + 2160; double * Gt_rhs2 = __sm_base + 2187; double * Gt_rhs1 = __sm_base + 2214; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 16710 // Dendro: printing temp variables const double DENDRO_0 = gt1[pp]*gt2[pp]; const double DENDRO_1 = -DENDRO_0 + gt0[pp]*gt4[pp]; const double DENDRO_2 = pow(gt4[pp], 2); const double DENDRO_3 = pow(gt1[pp], 2); const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = gt0[pp]*gt3[pp]; const double DENDRO_6 = -2*DENDRO_0*gt4[pp] + DENDRO_2*gt0[pp] + DENDRO_3*gt5[pp] + DENDRO_4*gt3[pp] - DENDRO_5*gt5[pp]; const double DENDRO_7 = 1.0/DENDRO_6; const double DENDRO_8 = grad2_0_2_beta0[pp]; const double DENDRO_9 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_10 = (7.0L/3.0L)*DENDRO_7*DENDRO_9; const double DENDRO_11 = grad2_1_2_beta1[pp]; const double DENDRO_12 = (1.0L/3.0L)*DENDRO_7*DENDRO_9; const double DENDRO_13 = grad2_2_2_beta2[pp]; const double DENDRO_14 = grad2_0_1_beta0[pp]; const double DENDRO_15 = gt1[pp]*gt5[pp] - gt2[pp]*gt4[pp]; const double DENDRO_16 = (7.0L/3.0L)*DENDRO_15*DENDRO_7; const double DENDRO_17 = grad2_1_1_beta1[pp]; const double DENDRO_18 = (1.0L/3.0L)*DENDRO_15*DENDRO_7; const double DENDRO_19 = grad2_1_2_beta2[pp]; const double DENDRO_20 = -DENDRO_3 + DENDRO_5; const double DENDRO_21 = DENDRO_20*DENDRO_7; const double DENDRO_22 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_23 = DENDRO_22*DENDRO_7; const double DENDRO_24 = grad2_0_0_beta0[pp]; const double DENDRO_25 = -DENDRO_2 + gt3[pp]*gt5[pp]; const double DENDRO_26 = DENDRO_25*DENDRO_7; const double DENDRO_27 = grad2_0_1_beta1[pp]; const double DENDRO_28 = (1.0L/3.0L)*DENDRO_25*DENDRO_7; const double DENDRO_29 = grad2_0_2_beta2[pp]; const double DENDRO_30 = pow(DENDRO_6, -2); const double DENDRO_31 = 2*DENDRO_30*grad_0_alpha[pp]; const double DENDRO_32 = pow(DENDRO_15, 2); const double DENDRO_33 = pow(DENDRO_9, 2); const double DENDRO_34 = DENDRO_15*DENDRO_9; const double DENDRO_35 = 2*At1[pp]*DENDRO_15; const double DENDRO_36 = 2*At2[pp]*DENDRO_9; const double DENDRO_37 = At0[pp]*pow(DENDRO_25, 2) + At3[pp]*DENDRO_32 - 2*At4[pp]*DENDRO_34 + At5[pp]*DENDRO_33 - DENDRO_25*DENDRO_35 + DENDRO_25*DENDRO_36; const double DENDRO_38 = (1.0L/3.0L)*DENDRO_7*alpha[pp]; const double DENDRO_39 = grad_0_K[pp]; const double DENDRO_40 = 1.0/chi[pp]; const double DENDRO_41 = 9*DENDRO_40*DENDRO_7*grad_0_chi[pp]; const double DENDRO_42 = grad_0_gt0[pp]; const double DENDRO_43 = grad_1_gt0[pp]; const double DENDRO_44 = -0.5*DENDRO_43 + 1.0*grad_0_gt1[pp]; const double DENDRO_45 = grad_2_gt0[pp]; const double DENDRO_46 = -0.5*DENDRO_45 + 1.0*grad_0_gt2[pp]; const double DENDRO_47 = DENDRO_15*DENDRO_44 - 0.5*DENDRO_25*DENDRO_42 - DENDRO_46*DENDRO_9; const double DENDRO_48 = pow(DENDRO_6, -3); const double DENDRO_49 = 2*DENDRO_37*DENDRO_48*alpha[pp]; const double DENDRO_50 = grad_1_gt3[pp]; const double DENDRO_51 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_52 = grad_2_gt3[pp]; const double DENDRO_53 = -0.5*DENDRO_52 + 1.0*grad_1_gt4[pp]; const double DENDRO_54 = grad_0_gt3[pp]; const double DENDRO_55 = 0.5*DENDRO_54 - 1.0*grad_1_gt1[pp]; const double DENDRO_56 = DENDRO_25*DENDRO_55 + DENDRO_50*DENDRO_51 - DENDRO_53*DENDRO_9; const double DENDRO_57 = pow(DENDRO_1, 2); const double DENDRO_58 = DENDRO_1*DENDRO_15; const double DENDRO_59 = 2*At4[pp]*DENDRO_1; const double DENDRO_60 = At0[pp]*DENDRO_32 + 2*At2[pp]*DENDRO_58 + At3[pp]*pow(DENDRO_22, 2) + At5[pp]*DENDRO_57 - DENDRO_22*DENDRO_35 - DENDRO_22*DENDRO_59; const double DENDRO_61 = 2*DENDRO_48*DENDRO_60*alpha[pp]; const double DENDRO_62 = grad_2_gt5[pp]; const double DENDRO_63 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_64 = grad_1_gt5[pp]; const double DENDRO_65 = 0.5*DENDRO_64 - 1.0*grad_2_gt4[pp]; const double DENDRO_66 = grad_0_gt5[pp]; const double DENDRO_67 = 0.5*DENDRO_66 - 1.0*grad_2_gt2[pp]; const double DENDRO_68 = -DENDRO_15*DENDRO_65 + DENDRO_25*DENDRO_67 - DENDRO_62*DENDRO_63; const double DENDRO_69 = DENDRO_1*DENDRO_9; const double DENDRO_70 = At0[pp]*DENDRO_33 - 2*At1[pp]*DENDRO_69 + At3[pp]*DENDRO_57 + At5[pp]*pow(DENDRO_20, 2) + DENDRO_20*DENDRO_36 - DENDRO_20*DENDRO_59; const double DENDRO_71 = 2*DENDRO_48*DENDRO_70*alpha[pp]; const double DENDRO_72 = 2*DENDRO_30*grad_2_alpha[pp]; const double DENDRO_73 = At0[pp]*DENDRO_25; const double DENDRO_74 = At5[pp]*DENDRO_20; const double DENDRO_75 = DENDRO_1*DENDRO_25; const double DENDRO_76 = DENDRO_15*DENDRO_20; const double DENDRO_77 = -At1[pp]*DENDRO_34 - At1[pp]*DENDRO_75 + At2[pp]*DENDRO_20*DENDRO_25 + At2[pp]*DENDRO_33 + At3[pp]*DENDRO_58 - At4[pp]*DENDRO_69 - At4[pp]*DENDRO_76 + DENDRO_73*DENDRO_9 + DENDRO_74*DENDRO_9; const double DENDRO_78 = 2*DENDRO_30*grad_1_alpha[pp]; const double DENDRO_79 = At1[pp]*DENDRO_32; const double DENDRO_80 = At4[pp]*DENDRO_58; const double DENDRO_81 = At2[pp]*DENDRO_34; const double DENDRO_82 = DENDRO_22*DENDRO_9; const double DENDRO_83 = At4[pp]*DENDRO_82; const double DENDRO_84 = At5[pp]*DENDRO_69; const double DENDRO_85 = DENDRO_15*DENDRO_73; const double DENDRO_86 = At1[pp]*DENDRO_22*DENDRO_25; const double DENDRO_87 = At2[pp]*DENDRO_75; const double DENDRO_88 = At3[pp]*DENDRO_22; const double DENDRO_89 = DENDRO_15*DENDRO_88; const double DENDRO_90 = DENDRO_79 + DENDRO_80 - DENDRO_81 + DENDRO_83 - DENDRO_84 - DENDRO_85 + DENDRO_86 - DENDRO_87 - DENDRO_89; const double DENDRO_91 = grad_0_gt4[pp]; const double DENDRO_92 = grad_2_gt1[pp]; const double DENDRO_93 = grad_1_gt2[pp]; const double DENDRO_94 = DENDRO_91 + DENDRO_92 - DENDRO_93; const double DENDRO_95 = DENDRO_15*DENDRO_94 - DENDRO_25*DENDRO_45 - DENDRO_66*DENDRO_9; const double DENDRO_96 = 2.0*DENDRO_48*DENDRO_77*alpha[pp]; const double DENDRO_97 = grad_2_K[pp]; const double DENDRO_98 = 4*gt1[pp]*gt4[pp] - 4*gt2[pp]*gt3[pp]; const double DENDRO_99 = 9*DENDRO_40*DENDRO_7*grad_2_chi[pp]; const double DENDRO_100 = DENDRO_91 - DENDRO_92 + DENDRO_93; const double DENDRO_101 = -DENDRO_100*DENDRO_9 + DENDRO_15*DENDRO_54 - DENDRO_25*DENDRO_43; const double DENDRO_102 = 2.0*DENDRO_48*DENDRO_90*alpha[pp]; const double DENDRO_103 = -DENDRO_91 + DENDRO_92 + DENDRO_93; const double DENDRO_104 = -DENDRO_103*DENDRO_25 + DENDRO_15*DENDRO_52 - DENDRO_64*DENDRO_9; const double DENDRO_105 = At4[pp]*DENDRO_57; const double DENDRO_106 = At1[pp]*DENDRO_58; const double DENDRO_107 = At0[pp]*DENDRO_34; const double DENDRO_108 = At1[pp]*DENDRO_82; const double DENDRO_109 = At2[pp]*DENDRO_69; const double DENDRO_110 = At2[pp]*DENDRO_76; const double DENDRO_111 = DENDRO_1*DENDRO_88; const double DENDRO_112 = At4[pp]*DENDRO_20*DENDRO_22; const double DENDRO_113 = DENDRO_1*DENDRO_74; const double DENDRO_114 = DENDRO_105 + DENDRO_106 - DENDRO_107 + DENDRO_108 - DENDRO_109 - DENDRO_110 - DENDRO_111 + DENDRO_112 - DENDRO_113; const double DENDRO_115 = 2.0*DENDRO_114*DENDRO_48*alpha[pp]; const double DENDRO_116 = grad_1_K[pp]; const double DENDRO_117 = 4*gt1[pp]*gt5[pp] - 4*gt2[pp]*gt4[pp]; const double DENDRO_118 = 9*DENDRO_40*DENDRO_7*grad_1_chi[pp]; const double DENDRO_119 = DENDRO_1*DENDRO_64 + DENDRO_103*DENDRO_15 - DENDRO_22*DENDRO_52; const double DENDRO_120 = DENDRO_1*DENDRO_119; const double DENDRO_121 = DENDRO_1*DENDRO_100 + DENDRO_15*DENDRO_43 - DENDRO_22*DENDRO_54; const double DENDRO_122 = DENDRO_121*DENDRO_15; const double DENDRO_123 = DENDRO_1*DENDRO_66 + DENDRO_15*DENDRO_45 - DENDRO_22*DENDRO_94; const double DENDRO_124 = DENDRO_123*DENDRO_9; const double DENDRO_125 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_126 = DENDRO_125*DENDRO_62 - DENDRO_15*DENDRO_67 + DENDRO_22*DENDRO_65; const double DENDRO_127 = DENDRO_126*DENDRO_20; const double DENDRO_128 = 0.5*DENDRO_22*DENDRO_50; const double DENDRO_129 = DENDRO_1*DENDRO_53; const double DENDRO_130 = DENDRO_15*DENDRO_55; const double DENDRO_131 = DENDRO_22*(-DENDRO_128 + DENDRO_129 - DENDRO_130); const double DENDRO_132 = DENDRO_1*DENDRO_46 - DENDRO_22*DENDRO_44 + DENDRO_42*DENDRO_51; const double DENDRO_133 = DENDRO_132*DENDRO_25; const double DENDRO_134 = DENDRO_30*(DENDRO_120 + DENDRO_122 - 1.0*DENDRO_124 - DENDRO_127 - DENDRO_131 - DENDRO_133); const double DENDRO_135 = DENDRO_1*DENDRO_52 - DENDRO_103*DENDRO_9 - DENDRO_20*DENDRO_64; const double DENDRO_136 = DENDRO_1*DENDRO_135; const double DENDRO_137 = DENDRO_1*DENDRO_54 - DENDRO_100*DENDRO_20 - DENDRO_43*DENDRO_9; const double DENDRO_138 = DENDRO_137*DENDRO_15; const double DENDRO_139 = DENDRO_1*DENDRO_94 - DENDRO_20*DENDRO_66 - DENDRO_45*DENDRO_9; const double DENDRO_140 = DENDRO_139*DENDRO_9; const double DENDRO_141 = -DENDRO_1*DENDRO_65 - 0.5*DENDRO_20*DENDRO_62 + DENDRO_67*DENDRO_9; const double DENDRO_142 = DENDRO_141*DENDRO_20; const double DENDRO_143 = DENDRO_125*DENDRO_50 - DENDRO_20*DENDRO_53 + DENDRO_55*DENDRO_9; const double DENDRO_144 = DENDRO_143*DENDRO_22; const double DENDRO_145 = DENDRO_1*DENDRO_44 - DENDRO_20*DENDRO_46 - DENDRO_42*DENDRO_63; const double DENDRO_146 = DENDRO_145*DENDRO_25; const double DENDRO_147 = DENDRO_30*(DENDRO_136 + DENDRO_138 - 1.0*DENDRO_140 - DENDRO_142 - DENDRO_144 - DENDRO_146); const double DENDRO_148 = grad_0_beta0[pp]; const double DENDRO_149 = DENDRO_1*DENDRO_104; const double DENDRO_150 = DENDRO_101*DENDRO_15; const double DENDRO_151 = DENDRO_9*DENDRO_95; const double DENDRO_152 = DENDRO_20*DENDRO_68; const double DENDRO_153 = DENDRO_22*DENDRO_56; const double DENDRO_154 = DENDRO_25*DENDRO_47; const double DENDRO_155 = DENDRO_30*(DENDRO_149 + DENDRO_150 - 1.0*DENDRO_151 - DENDRO_152 - DENDRO_153 - DENDRO_154); const double DENDRO_156 = grad_1_beta1[pp]; const double DENDRO_157 = grad_2_beta2[pp]; const double DENDRO_158 = (2.0L/3.0L)*DENDRO_148 + (2.0L/3.0L)*DENDRO_156 + (2.0L/3.0L)*DENDRO_157; const double DENDRO_159 = (1.0L/3.0L)*DENDRO_1*DENDRO_7; const double DENDRO_160 = (7.0L/3.0L)*DENDRO_1*DENDRO_7; const double DENDRO_161 = (1.0L/3.0L)*DENDRO_22*DENDRO_7; const double DENDRO_162 = -DENDRO_105 - DENDRO_106 + DENDRO_107 - DENDRO_108 + DENDRO_109 + DENDRO_110 + DENDRO_111 - DENDRO_112 + DENDRO_113; const double DENDRO_163 = -DENDRO_79 - DENDRO_80 + DENDRO_81 - DENDRO_83 + DENDRO_84 + DENDRO_85 - DENDRO_86 + DENDRO_87 + DENDRO_89; const double DENDRO_164 = 2.0*DENDRO_48*alpha[pp]; const double DENDRO_165 = 4*gt0[pp]*gt4[pp] - 4*gt1[pp]*gt2[pp]; const double DENDRO_166 = DENDRO_30*(-1.0*DENDRO_120 - 1.0*DENDRO_122 + DENDRO_124 + DENDRO_127 + DENDRO_131 + DENDRO_133); const double DENDRO_167 = (1.0L/3.0L)*DENDRO_20*DENDRO_7; // Dendro: printing variables Gt_rhs0[pp] = 2*DENDRO_1*DENDRO_7*grad2_1_2_beta0[pp] - DENDRO_10*DENDRO_8 + DENDRO_101*DENDRO_102 + DENDRO_104*DENDRO_115 - DENDRO_11*DENDRO_12 - DENDRO_12*DENDRO_13 - DENDRO_134*grad_1_beta0[pp] + DENDRO_14*DENDRO_16 - DENDRO_147*grad_2_beta0[pp] - DENDRO_148*DENDRO_155 + DENDRO_155*DENDRO_158 + DENDRO_17*DENDRO_18 + DENDRO_18*DENDRO_19 - DENDRO_21*grad2_2_2_beta0[pp] - DENDRO_23*grad2_1_1_beta0[pp] - 4.0L/3.0L*DENDRO_24*DENDRO_26 - DENDRO_27*DENDRO_28 - DENDRO_28*DENDRO_29 - DENDRO_31*DENDRO_37 - DENDRO_38*(DENDRO_116*DENDRO_117 + DENDRO_118*DENDRO_90) - DENDRO_38*(-4*DENDRO_25*DENDRO_39 + DENDRO_37*DENDRO_41) - DENDRO_38*(DENDRO_77*DENDRO_99 - DENDRO_97*DENDRO_98) + DENDRO_47*DENDRO_49 + DENDRO_56*DENDRO_61 + DENDRO_68*DENDRO_71 - DENDRO_72*DENDRO_77 - DENDRO_78*DENDRO_90 + DENDRO_95*DENDRO_96 + beta0[pp]*agrad_0_Gt0[pp] + beta1[pp]*agrad_1_Gt0[pp] + beta2[pp]*agrad_2_Gt0[pp]; Gt_rhs1[pp] = DENDRO_11*DENDRO_160 - DENDRO_119*DENDRO_162*DENDRO_164 - DENDRO_121*DENDRO_163*DENDRO_164 + DENDRO_123*DENDRO_96 + DENDRO_126*DENDRO_71 + DENDRO_13*DENDRO_159 + DENDRO_132*DENDRO_49 - DENDRO_14*DENDRO_161 + DENDRO_156*DENDRO_166 - DENDRO_158*DENDRO_166 + DENDRO_159*DENDRO_8 + DENDRO_16*DENDRO_27 - DENDRO_161*DENDRO_19 + DENDRO_162*DENDRO_72 + DENDRO_163*DENDRO_31 - 4.0L/3.0L*DENDRO_17*DENDRO_23 + DENDRO_18*DENDRO_24 + DENDRO_18*DENDRO_29 - DENDRO_21*grad2_2_2_beta1[pp] - DENDRO_26*grad2_0_0_beta1[pp] + DENDRO_30*(-1.0*DENDRO_136 - 1.0*DENDRO_138 + DENDRO_140 + DENDRO_142 + DENDRO_144 + DENDRO_146)*grad_2_beta1[pp] + DENDRO_30*(-1.0*DENDRO_149 - 1.0*DENDRO_150 + DENDRO_151 + DENDRO_152 + DENDRO_153 + DENDRO_154)*grad_0_beta1[pp] + DENDRO_38*(4*DENDRO_116*DENDRO_22 - DENDRO_118*DENDRO_60) - DENDRO_38*(DENDRO_117*DENDRO_39 - DENDRO_163*DENDRO_41) - DENDRO_38*(-DENDRO_162*DENDRO_99 + DENDRO_165*DENDRO_97) - DENDRO_60*DENDRO_78 - DENDRO_61*(DENDRO_128 - DENDRO_129 + DENDRO_130) - 2*DENDRO_7*DENDRO_9*grad2_0_2_beta1[pp] + beta0[pp]*agrad_0_Gt1[pp] + beta1[pp]*agrad_1_Gt1[pp] + beta2[pp]*agrad_2_Gt1[pp]; Gt_rhs2[pp] = -DENDRO_10*DENDRO_29 + DENDRO_102*DENDRO_137 - DENDRO_11*DENDRO_167 - DENDRO_114*DENDRO_78 + DENDRO_115*DENDRO_135 - DENDRO_12*DENDRO_24 - DENDRO_12*DENDRO_27 - 4.0L/3.0L*DENDRO_13*DENDRO_21 - DENDRO_134*grad_1_beta2[pp] + DENDRO_139*DENDRO_96 + DENDRO_14*DENDRO_159 + DENDRO_141*DENDRO_71 + DENDRO_143*DENDRO_61 + DENDRO_145*DENDRO_49 - DENDRO_147*DENDRO_157 + DENDRO_147*DENDRO_158 + 2*DENDRO_15*DENDRO_7*grad2_0_1_beta2[pp] - DENDRO_155*grad_0_beta2[pp] + DENDRO_159*DENDRO_17 + DENDRO_160*DENDRO_19 - DENDRO_167*DENDRO_8 - DENDRO_23*grad2_1_1_beta2[pp] - DENDRO_26*grad2_0_0_beta2[pp] - DENDRO_31*DENDRO_77 - DENDRO_38*(DENDRO_114*DENDRO_118 + DENDRO_116*DENDRO_165) - DENDRO_38*(-4*DENDRO_20*DENDRO_97 + DENDRO_70*DENDRO_99) - DENDRO_38*(-DENDRO_39*DENDRO_98 + DENDRO_41*DENDRO_77) - DENDRO_70*DENDRO_72 + beta0[pp]*agrad_0_Gt2[pp] + beta1[pp]*agrad_1_Gt2[pp] + beta2[pp]*agrad_2_Gt2[pp]; // Dendro: reduced ops: 732 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(Gt_rhs0, &__unzipOutVar[cuda::VAR::U_GT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(Gt_rhs2, &__unzipOutVar[cuda::VAR::U_GT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(Gt_rhs1, &__unzipOutVar[cuda::VAR::U_GT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_Gt_rhs /**@brief compute B_rhs @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __compute_B_rhs(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // generated code for B_rhs begin /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={3,3,3}; //input vars begin double * gt1 = __sm_base + 0; double * beta1 = __sm_base + 27; double * gt3 = __sm_base + 54; double * At1 = __sm_base + 81; double * gt5 = __sm_base + 108; double * gt4 = __sm_base + 135; double * alpha = __sm_base + 162; double * gt2 = __sm_base + 189; double * At3 = __sm_base + 216; double * beta2 = __sm_base + 243; double * B2 = __sm_base + 270; double * At4 = __sm_base + 297; double * At0 = __sm_base + 324; double * At2 = __sm_base + 351; double * beta0 = __sm_base + 378; double * gt0 = __sm_base + 405; double * chi = __sm_base + 432; double * B1 = __sm_base + 459; double * B0 = __sm_base + 486; double * At5 = __sm_base + 513; //input vars end // staged vars begin // staged vars end // deriv vars begin double * grad_0_gt5 = __sm_base + 540; double * grad_1_gt0 = __sm_base + 567; double * agrad_0_B1 = __sm_base + 594; double * grad2_1_2_beta2 = __sm_base + 621; double * grad_2_K = __sm_base + 648; double * grad_0_beta0 = __sm_base + 675; double * grad_2_gt0 = __sm_base + 702; double * grad_0_gt4 = __sm_base + 729; double * grad2_1_1_beta1 = __sm_base + 756; double * grad_2_gt3 = __sm_base + 783; double * grad2_0_1_beta0 = __sm_base + 810; double * grad2_0_0_beta1 = __sm_base + 837; double * grad_1_K = __sm_base + 864; double * grad_1_alpha = __sm_base + 891; double * grad2_0_2_beta1 = __sm_base + 918; double * grad2_0_1_beta1 = __sm_base + 945; double * grad2_2_2_beta2 = __sm_base + 972; double * grad_1_beta1 = __sm_base + 999; double * grad_2_alpha = __sm_base + 1026; double * grad_2_beta0 = __sm_base + 1053; double * grad_0_gt1 = __sm_base + 1080; double * grad_1_gt5 = __sm_base + 1107; double * agrad_2_B0 = __sm_base + 1134; double * agrad_2_Gt1 = __sm_base + 1161; double * agrad_1_B1 = __sm_base + 1188; double * agrad_0_Gt2 = __sm_base + 1215; double * grad2_1_1_beta2 = __sm_base + 1242; double * grad_1_gt4 = __sm_base + 1269; double * grad2_2_2_beta1 = __sm_base + 1296; double * grad_1_gt1 = __sm_base + 1323; double * agrad_2_B1 = __sm_base + 1350; double * grad_2_gt4 = __sm_base + 1377; double * grad_0_beta1 = __sm_base + 1404; double * agrad_1_B0 = __sm_base + 1431; double * grad_0_alpha = __sm_base + 1458; double * agrad_0_B0 = __sm_base + 1485; double * grad_0_chi = __sm_base + 1512; double * grad_2_beta2 = __sm_base + 1539; double * grad2_1_2_beta0 = __sm_base + 1566; double * grad2_1_1_beta0 = __sm_base + 1593; double * agrad_0_Gt1 = __sm_base + 1620; double * grad_0_K = __sm_base + 1647; double * agrad_1_B2 = __sm_base + 1674; double * grad2_0_2_beta2 = __sm_base + 1701; double * agrad_2_B2 = __sm_base + 1728; double * agrad_0_Gt0 = __sm_base + 1755; double * agrad_1_Gt1 = __sm_base + 1782; double * grad2_0_0_beta0 = __sm_base + 1809; double * agrad_1_Gt2 = __sm_base + 1836; double * agrad_1_Gt0 = __sm_base + 1863; double * grad_2_gt2 = __sm_base + 1890; double * grad_1_chi = __sm_base + 1917; double * grad_0_gt0 = __sm_base + 1944; double * agrad_0_B2 = __sm_base + 1971; double * grad_0_gt3 = __sm_base + 1998; double * grad2_2_2_beta0 = __sm_base + 2025; double * agrad_2_Gt2 = __sm_base + 2052; double * grad_2_beta1 = __sm_base + 2079; double * grad_2_gt5 = __sm_base + 2106; double * grad_1_beta0 = __sm_base + 2133; double * grad2_0_1_beta2 = __sm_base + 2160; double * grad_1_gt3 = __sm_base + 2187; double * grad2_0_2_beta0 = __sm_base + 2214; double * grad_2_chi = __sm_base + 2241; double * grad_2_gt1 = __sm_base + 2268; double * agrad_2_Gt0 = __sm_base + 2295; double * grad_0_gt2 = __sm_base + 2322; double * grad_0_beta2 = __sm_base + 2349; double * grad_1_beta2 = __sm_base + 2376; double * grad2_0_0_beta2 = __sm_base + 2403; double * grad_1_gt2 = __sm_base + 2430; double * grad2_1_2_beta1 = __sm_base + 2457; // deriv vars end // output vars begin double * B_rhs1 = __sm_base + 2484; double * B_rhs0 = __sm_base + 2511; double * B_rhs2 = __sm_base + 2538; // output vars end const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); //load data from global to shared memory cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT1][offset],(double *) gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA1][offset],(double *) beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT3][offset],(double *) gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT1][offset],(double *) At1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT5][offset],(double *) gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT4][offset],(double *) gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_ALPHA][offset],(double *) alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT2][offset],(double *) gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT3][offset],(double *) At3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA2][offset],(double *) beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B2][offset],(double *) B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT4][offset],(double *) At4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT0][offset],(double *) At0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT2][offset],(double *) At2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_BETA0][offset],(double *) beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMGT0][offset],(double *) gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_CHI][offset],(double *) chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B1][offset],(double *) B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_B0][offset],(double *) B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipInVar[cuda::VAR::U_SYMAT5][offset],(double *) At5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt4,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_alpha,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_K,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_1_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_0_B2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_2_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_2_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt5,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt3,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_2_beta0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_chi,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_2_gt1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__agrad_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) agrad_2_Gt0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_0_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_0_0_beta2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad_1_gt2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__grad2_1_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) grad2_1_2_beta1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ double x,y,z,r_coord,eta; unsigned int pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ z = ptmin[2] + (k+ijk_lm[4])*dz; y = ptmin[1] + (threadIdx.y+ijk_lm[2])*dy; x = ptmin[0] + (threadIdx.x+ijk_lm[0])*dx; r_coord = sqrt(x*x + y*y + z*z); eta=ETA_CONST; if (r_coord >= ETA_R0) { eta *= pow( (ETA_R0/r_coord), ETA_DAMPING_EXP); } // Dendro: {{{ // Dendro: original ops: 17226 // Dendro: printing temp variables const double DENDRO_0 = beta0[pp]*agrad_0_Gt0[pp] + beta1[pp]*agrad_1_Gt0[pp] + beta2[pp]*agrad_2_Gt0[pp]; const double DENDRO_1 = 2*gt0[pp]*gt4[pp] - 2*gt1[pp]*gt2[pp]; const double DENDRO_2 = pow(gt4[pp], 2); const double DENDRO_3 = pow(gt1[pp], 2); const double DENDRO_4 = pow(gt2[pp], 2); const double DENDRO_5 = gt0[pp]*gt3[pp]; const double DENDRO_6 = gt1[pp]*gt2[pp]; const double DENDRO_7 = DENDRO_2*gt0[pp] + DENDRO_3*gt5[pp] + DENDRO_4*gt3[pp] - DENDRO_5*gt5[pp] - 2*DENDRO_6*gt4[pp]; const double DENDRO_8 = 1.0/DENDRO_7; const double DENDRO_9 = grad2_0_2_beta0[pp]; const double DENDRO_10 = gt1[pp]*gt4[pp] - gt2[pp]*gt3[pp]; const double DENDRO_11 = (7.0L/3.0L)*DENDRO_10*DENDRO_8; const double DENDRO_12 = grad2_1_2_beta1[pp]; const double DENDRO_13 = (1.0L/3.0L)*DENDRO_10*DENDRO_8; const double DENDRO_14 = grad2_2_2_beta2[pp]; const double DENDRO_15 = grad2_0_1_beta0[pp]; const double DENDRO_16 = gt1[pp]*gt5[pp] - gt2[pp]*gt4[pp]; const double DENDRO_17 = (7.0L/3.0L)*DENDRO_16*DENDRO_8; const double DENDRO_18 = grad2_1_1_beta1[pp]; const double DENDRO_19 = (1.0L/3.0L)*DENDRO_16*DENDRO_8; const double DENDRO_20 = grad2_1_2_beta2[pp]; const double DENDRO_21 = -DENDRO_3 + DENDRO_5; const double DENDRO_22 = DENDRO_21*DENDRO_8; const double DENDRO_23 = -DENDRO_4 + gt0[pp]*gt5[pp]; const double DENDRO_24 = DENDRO_23*DENDRO_8; const double DENDRO_25 = grad2_0_0_beta0[pp]; const double DENDRO_26 = -DENDRO_2 + gt3[pp]*gt5[pp]; const double DENDRO_27 = DENDRO_26*DENDRO_8; const double DENDRO_28 = grad2_0_1_beta1[pp]; const double DENDRO_29 = (1.0L/3.0L)*DENDRO_26*DENDRO_8; const double DENDRO_30 = grad2_0_2_beta2[pp]; const double DENDRO_31 = pow(DENDRO_7, -2); const double DENDRO_32 = 2*DENDRO_31*grad_0_alpha[pp]; const double DENDRO_33 = pow(DENDRO_16, 2); const double DENDRO_34 = pow(DENDRO_10, 2); const double DENDRO_35 = 2*gt1[pp]*gt5[pp] - 2*gt2[pp]*gt4[pp]; const double DENDRO_36 = 2*gt1[pp]*gt4[pp] - 2*gt2[pp]*gt3[pp]; const double DENDRO_37 = At0[pp]*pow(DENDRO_26, 2) - At1[pp]*DENDRO_26*DENDRO_35 + At2[pp]*DENDRO_26*DENDRO_36 + At3[pp]*DENDRO_33 - At4[pp]*DENDRO_10*DENDRO_35 + At5[pp]*DENDRO_34; const double DENDRO_38 = grad_2_chi[pp]; const double DENDRO_39 = grad_1_chi[pp]; const double DENDRO_40 = grad_0_chi[pp]; const double DENDRO_41 = 2*DENDRO_38; const double DENDRO_42 = -DENDRO_6 + gt0[pp]*gt4[pp]; const double DENDRO_43 = R0*sqrt(DENDRO_8*(-DENDRO_10*DENDRO_40*DENDRO_41 - DENDRO_21*pow(DENDRO_38, 2) - DENDRO_23*pow(DENDRO_39, 2) - DENDRO_26*pow(DENDRO_40, 2) + DENDRO_35*DENDRO_39*DENDRO_40 + DENDRO_39*DENDRO_41*DENDRO_42))*pow(-pow(chi[pp], eta_power[0]) + 1, -eta_power[1]); const double DENDRO_44 = (1.0L/3.0L)*DENDRO_8*alpha[pp]; const double DENDRO_45 = grad_0_K[pp]; const double DENDRO_46 = 1.0/chi[pp]; const double DENDRO_47 = 9*DENDRO_40*DENDRO_46*DENDRO_8; const double DENDRO_48 = grad_0_gt0[pp]; const double DENDRO_49 = grad_1_gt0[pp]; const double DENDRO_50 = -0.5*DENDRO_49 + 1.0*grad_0_gt1[pp]; const double DENDRO_51 = grad_2_gt0[pp]; const double DENDRO_52 = -0.5*DENDRO_51 + 1.0*grad_0_gt2[pp]; const double DENDRO_53 = -DENDRO_10*DENDRO_52 + DENDRO_16*DENDRO_50 - 0.5*DENDRO_26*DENDRO_48; const double DENDRO_54 = pow(DENDRO_7, -3); const double DENDRO_55 = 2*DENDRO_37*DENDRO_54*alpha[pp]; const double DENDRO_56 = grad_1_gt3[pp]; const double DENDRO_57 = 0.5*gt1[pp]*gt5[pp] - 0.5*gt2[pp]*gt4[pp]; const double DENDRO_58 = grad_2_gt3[pp]; const double DENDRO_59 = -0.5*DENDRO_58 + 1.0*grad_1_gt4[pp]; const double DENDRO_60 = grad_0_gt3[pp]; const double DENDRO_61 = 0.5*DENDRO_60 - 1.0*grad_1_gt1[pp]; const double DENDRO_62 = -DENDRO_10*DENDRO_59 + DENDRO_26*DENDRO_61 + DENDRO_56*DENDRO_57; const double DENDRO_63 = pow(DENDRO_42, 2); const double DENDRO_64 = At1[pp]*DENDRO_23; const double DENDRO_65 = At0[pp]*DENDRO_33 + At2[pp]*DENDRO_35*DENDRO_42 + At3[pp]*pow(DENDRO_23, 2) - At4[pp]*DENDRO_1*DENDRO_23 + At5[pp]*DENDRO_63 - DENDRO_35*DENDRO_64; const double DENDRO_66 = 2*DENDRO_54*DENDRO_65*alpha[pp]; const double DENDRO_67 = grad_2_gt5[pp]; const double DENDRO_68 = 0.5*gt1[pp]*gt4[pp] - 0.5*gt2[pp]*gt3[pp]; const double DENDRO_69 = grad_1_gt5[pp]; const double DENDRO_70 = 0.5*DENDRO_69 - 1.0*grad_2_gt4[pp]; const double DENDRO_71 = grad_0_gt5[pp]; const double DENDRO_72 = 0.5*DENDRO_71 - 1.0*grad_2_gt2[pp]; const double DENDRO_73 = -DENDRO_16*DENDRO_70 + DENDRO_26*DENDRO_72 - DENDRO_67*DENDRO_68; const double DENDRO_74 = DENDRO_10*DENDRO_42; const double DENDRO_75 = At2[pp]*DENDRO_21; const double DENDRO_76 = At4[pp]*DENDRO_21; const double DENDRO_77 = At0[pp]*DENDRO_34 - 2*At1[pp]*DENDRO_74 + At3[pp]*DENDRO_63 + At5[pp]*pow(DENDRO_21, 2) - DENDRO_1*DENDRO_76 + DENDRO_36*DENDRO_75; const double DENDRO_78 = 2*DENDRO_54*DENDRO_77*alpha[pp]; const double DENDRO_79 = 2*DENDRO_31*grad_2_alpha[pp]; const double DENDRO_80 = DENDRO_16*DENDRO_42; const double DENDRO_81 = At0[pp]*DENDRO_26; const double DENDRO_82 = DENDRO_10*DENDRO_16; const double DENDRO_83 = At5[pp]*DENDRO_21; const double DENDRO_84 = DENDRO_26*DENDRO_42; const double DENDRO_85 = DENDRO_16*DENDRO_21; const double DENDRO_86 = -At1[pp]*DENDRO_82 - At1[pp]*DENDRO_84 + At2[pp]*DENDRO_34 + At3[pp]*DENDRO_80 - At4[pp]*DENDRO_74 - At4[pp]*DENDRO_85 + DENDRO_10*DENDRO_81 + DENDRO_10*DENDRO_83 + DENDRO_26*DENDRO_75; const double DENDRO_87 = 2*DENDRO_31*grad_1_alpha[pp]; const double DENDRO_88 = DENDRO_10*DENDRO_23; const double DENDRO_89 = At3[pp]*DENDRO_23; const double DENDRO_90 = At1[pp]*DENDRO_33 - At2[pp]*DENDRO_82 - At2[pp]*DENDRO_84 + At4[pp]*DENDRO_80 + At4[pp]*DENDRO_88 - At5[pp]*DENDRO_74 - DENDRO_16*DENDRO_81 - DENDRO_16*DENDRO_89 + DENDRO_26*DENDRO_64; const double DENDRO_91 = grad_0_gt4[pp]; const double DENDRO_92 = grad_2_gt1[pp]; const double DENDRO_93 = grad_1_gt2[pp]; const double DENDRO_94 = DENDRO_91 + DENDRO_92 - DENDRO_93; const double DENDRO_95 = -DENDRO_10*DENDRO_71 + DENDRO_16*DENDRO_94 - DENDRO_26*DENDRO_51; const double DENDRO_96 = 2.0*DENDRO_54*DENDRO_86*alpha[pp]; const double DENDRO_97 = grad_2_K[pp]; const double DENDRO_98 = 4*gt1[pp]*gt4[pp] - 4*gt2[pp]*gt3[pp]; const double DENDRO_99 = 9*DENDRO_38*DENDRO_46*DENDRO_8; const double DENDRO_100 = DENDRO_91 - DENDRO_92 + DENDRO_93; const double DENDRO_101 = -DENDRO_10*DENDRO_100 + DENDRO_16*DENDRO_60 - DENDRO_26*DENDRO_49; const double DENDRO_102 = 2.0*DENDRO_54*DENDRO_90*alpha[pp]; const double DENDRO_103 = -DENDRO_91 + DENDRO_92 + DENDRO_93; const double DENDRO_104 = -DENDRO_10*DENDRO_69 - DENDRO_103*DENDRO_26 + DENDRO_16*DENDRO_58; const double DENDRO_105 = -At0[pp]*DENDRO_82 + At1[pp]*DENDRO_80 + At1[pp]*DENDRO_88 - At2[pp]*DENDRO_74 - At2[pp]*DENDRO_85 + At4[pp]*DENDRO_63 + DENDRO_23*DENDRO_76 - DENDRO_42*DENDRO_83 - DENDRO_42*DENDRO_89; const double DENDRO_106 = 2.0*DENDRO_105*DENDRO_54*alpha[pp]; const double DENDRO_107 = grad_1_K[pp]; const double DENDRO_108 = 4*gt1[pp]*gt5[pp] - 4*gt2[pp]*gt4[pp]; const double DENDRO_109 = 9*DENDRO_39*DENDRO_46*DENDRO_8; const double DENDRO_110 = DENDRO_103*DENDRO_16 - DENDRO_23*DENDRO_58 + DENDRO_42*DENDRO_69; const double DENDRO_111 = DENDRO_100*DENDRO_42 + DENDRO_16*DENDRO_49 - DENDRO_23*DENDRO_60; const double DENDRO_112 = 1.0*gt1[pp]*gt4[pp] - 1.0*gt2[pp]*gt3[pp]; const double DENDRO_113 = DENDRO_16*DENDRO_51 - DENDRO_23*DENDRO_94 + DENDRO_42*DENDRO_71; const double DENDRO_114 = 0.5*gt0[pp]*gt4[pp] - 0.5*gt1[pp]*gt2[pp]; const double DENDRO_115 = DENDRO_114*DENDRO_67 - DENDRO_16*DENDRO_72 + DENDRO_23*DENDRO_70; const double DENDRO_116 = -DENDRO_16*DENDRO_61 - 0.5*DENDRO_23*DENDRO_56 + DENDRO_42*DENDRO_59; const double DENDRO_117 = -DENDRO_23*DENDRO_50 + DENDRO_42*DENDRO_52 + DENDRO_48*DENDRO_57; const double DENDRO_118 = DENDRO_31*(DENDRO_110*DENDRO_42 + DENDRO_111*DENDRO_16 - DENDRO_112*DENDRO_113 - DENDRO_115*DENDRO_21 - DENDRO_116*DENDRO_23 - DENDRO_117*DENDRO_26); const double DENDRO_119 = -DENDRO_10*DENDRO_103 - DENDRO_21*DENDRO_69 + DENDRO_42*DENDRO_58; const double DENDRO_120 = -DENDRO_10*DENDRO_49 - DENDRO_100*DENDRO_21 + DENDRO_42*DENDRO_60; const double DENDRO_121 = -DENDRO_10*DENDRO_51 - DENDRO_21*DENDRO_71 + DENDRO_42*DENDRO_94; const double DENDRO_122 = DENDRO_10*DENDRO_72 - 0.5*DENDRO_21*DENDRO_67 - DENDRO_42*DENDRO_70; const double DENDRO_123 = DENDRO_10*DENDRO_61 + DENDRO_114*DENDRO_56 - DENDRO_21*DENDRO_59; const double DENDRO_124 = -DENDRO_21*DENDRO_52 + DENDRO_42*DENDRO_50 - DENDRO_48*DENDRO_68; const double DENDRO_125 = DENDRO_31*(-DENDRO_112*DENDRO_121 + DENDRO_119*DENDRO_42 + DENDRO_120*DENDRO_16 - DENDRO_122*DENDRO_21 - DENDRO_123*DENDRO_23 - DENDRO_124*DENDRO_26); const double DENDRO_126 = grad_0_beta0[pp]; const double DENDRO_127 = DENDRO_31*(DENDRO_101*DENDRO_16 + DENDRO_104*DENDRO_42 - DENDRO_112*DENDRO_95 - DENDRO_21*DENDRO_73 - DENDRO_23*DENDRO_62 - DENDRO_26*DENDRO_53); const double DENDRO_128 = grad_1_beta1[pp]; const double DENDRO_129 = grad_2_beta2[pp]; const double DENDRO_130 = (2.0L/3.0L)*DENDRO_126 + (2.0L/3.0L)*DENDRO_128 + (2.0L/3.0L)*DENDRO_129; const double DENDRO_131 = beta0[pp]*agrad_0_Gt1[pp] + beta1[pp]*agrad_1_Gt1[pp] + beta2[pp]*agrad_2_Gt1[pp]; const double DENDRO_132 = (1.0L/3.0L)*DENDRO_42*DENDRO_8; const double DENDRO_133 = (7.0L/3.0L)*DENDRO_42*DENDRO_8; const double DENDRO_134 = (1.0L/3.0L)*DENDRO_23*DENDRO_8; const double DENDRO_135 = 4*gt0[pp]*gt4[pp] - 4*gt1[pp]*gt2[pp]; const double DENDRO_136 = beta0[pp]*agrad_0_Gt2[pp] + beta1[pp]*agrad_1_Gt2[pp] + beta2[pp]*agrad_2_Gt2[pp]; const double DENDRO_137 = (1.0L/3.0L)*DENDRO_21*DENDRO_8; // Dendro: printing variables B_rhs0[pp] = -B0[pp]*DENDRO_43 - DENDRO_0*lambda[3] + DENDRO_0 + DENDRO_1*DENDRO_8*grad2_1_2_beta0[pp] + DENDRO_101*DENDRO_102 + DENDRO_104*DENDRO_106 - DENDRO_11*DENDRO_9 - DENDRO_118*grad_1_beta0[pp] - DENDRO_12*DENDRO_13 - DENDRO_125*grad_2_beta0[pp] - DENDRO_126*DENDRO_127 + DENDRO_127*DENDRO_130 - DENDRO_13*DENDRO_14 + DENDRO_15*DENDRO_17 + DENDRO_18*DENDRO_19 + DENDRO_19*DENDRO_20 - DENDRO_22*grad2_2_2_beta0[pp] - DENDRO_24*grad2_1_1_beta0[pp] - 4.0L/3.0L*DENDRO_25*DENDRO_27 - DENDRO_28*DENDRO_29 - DENDRO_29*DENDRO_30 - DENDRO_32*DENDRO_37 - DENDRO_44*(DENDRO_107*DENDRO_108 + DENDRO_109*DENDRO_90) - DENDRO_44*(-4*DENDRO_26*DENDRO_45 + DENDRO_37*DENDRO_47) - DENDRO_44*(DENDRO_86*DENDRO_99 - DENDRO_97*DENDRO_98) + DENDRO_53*DENDRO_55 + DENDRO_62*DENDRO_66 + DENDRO_73*DENDRO_78 - DENDRO_79*DENDRO_86 - DENDRO_87*DENDRO_90 + DENDRO_95*DENDRO_96 + lambda[2]*(beta0[pp]*agrad_0_B0[pp] + beta1[pp]*agrad_1_B0[pp] + beta2[pp]*agrad_2_B0[pp]); B_rhs1[pp] = -B1[pp]*DENDRO_43 + DENDRO_102*DENDRO_111 - DENDRO_105*DENDRO_79 + DENDRO_106*DENDRO_110 + DENDRO_113*DENDRO_96 + DENDRO_115*DENDRO_78 + DENDRO_116*DENDRO_66 + DENDRO_117*DENDRO_55 - DENDRO_118*DENDRO_128 + DENDRO_118*DENDRO_130 + DENDRO_12*DENDRO_133 - DENDRO_125*grad_2_beta1[pp] - DENDRO_127*grad_0_beta1[pp] - DENDRO_131*lambda[3] + DENDRO_131 + DENDRO_132*DENDRO_14 + DENDRO_132*DENDRO_9 - DENDRO_134*DENDRO_15 - DENDRO_134*DENDRO_20 + DENDRO_17*DENDRO_28 - 4.0L/3.0L*DENDRO_18*DENDRO_24 + DENDRO_19*DENDRO_25 + DENDRO_19*DENDRO_30 - DENDRO_22*grad2_2_2_beta1[pp] - DENDRO_27*grad2_0_0_beta1[pp] - DENDRO_32*DENDRO_90 - DENDRO_36*DENDRO_8*grad2_0_2_beta1[pp] - DENDRO_44*(DENDRO_105*DENDRO_99 + DENDRO_135*DENDRO_97) - DENDRO_44*(-4*DENDRO_107*DENDRO_23 + DENDRO_109*DENDRO_65) - DENDRO_44*(DENDRO_108*DENDRO_45 + DENDRO_47*DENDRO_90) - DENDRO_65*DENDRO_87 + lambda[2]*(beta0[pp]*agrad_0_B1[pp] + beta1[pp]*agrad_1_B1[pp] + beta2[pp]*agrad_2_B1[pp]); B_rhs2[pp] = -B2[pp]*DENDRO_43 + DENDRO_102*DENDRO_120 - DENDRO_105*DENDRO_87 + DENDRO_106*DENDRO_119 - DENDRO_11*DENDRO_30 - DENDRO_118*grad_1_beta2[pp] - DENDRO_12*DENDRO_137 + DENDRO_121*DENDRO_96 + DENDRO_122*DENDRO_78 + DENDRO_123*DENDRO_66 + DENDRO_124*DENDRO_55 - DENDRO_125*DENDRO_129 + DENDRO_125*DENDRO_130 - DENDRO_127*grad_0_beta2[pp] - DENDRO_13*DENDRO_25 - DENDRO_13*DENDRO_28 + DENDRO_132*DENDRO_15 + DENDRO_132*DENDRO_18 + DENDRO_133*DENDRO_20 - DENDRO_136*lambda[3] + DENDRO_136 - DENDRO_137*DENDRO_9 - 4.0L/3.0L*DENDRO_14*DENDRO_22 - DENDRO_24*grad2_1_1_beta2[pp] - DENDRO_27*grad2_0_0_beta2[pp] - DENDRO_32*DENDRO_86 + DENDRO_35*DENDRO_8*grad2_0_1_beta2[pp] - DENDRO_44*(DENDRO_105*DENDRO_109 + DENDRO_107*DENDRO_135) - DENDRO_44*(-4*DENDRO_21*DENDRO_97 + DENDRO_77*DENDRO_99) - DENDRO_44*(-DENDRO_45*DENDRO_98 + DENDRO_47*DENDRO_86) - DENDRO_77*DENDRO_79 + lambda[2]*(beta0[pp]*agrad_0_B2[pp] + beta1[pp]*agrad_1_B2[pp] + beta2[pp]*agrad_2_B2[pp]); // Dendro: reduced ops: 765 // Dendro: }}} } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(B_rhs1, &__unzipOutVar[cuda::VAR::U_B1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(B_rhs0, &__unzipOutVar[cuda::VAR::U_B0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); cuda::__storeSharedToGlobal3D<double>(B_rhs2, &__unzipOutVar[cuda::VAR::U_B2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z } // end of function__compute_B_rhs /**@brief apply KO dissipation @param[in] __unzipInVar: unzipped input array (global memory) @param[in] MemoryDerivs: allocated workspace for derivative computations @param[in] __dendroBlkList: dendro block list @param[in] __gpuBlockMap: gpu block map @param[in] __deviceProperties: cuda device properties @param[out] __unzipOutVar: unzipped output computed rhs */ __device__ void __ko_dissipation(double **__unzipOutVar, const double**__unzipInVar,MemoryDerivs* __derivWorkspace, const cuda::_Block* dblock, const unsigned int * __gpuBlockMap,const cuda::BSSNComputeParams * __bssnParams,const cudaDeviceProp* __deviceProperties, double* __sm_base, unsigned int stream_id){ // bssn compute parameters const double lambda[4]={__bssnParams->BSSN_LAMBDA[0],__bssnParams->BSSN_LAMBDA[1],__bssnParams->BSSN_LAMBDA[2],__bssnParams->BSSN_LAMBDA[3]}; const double lambda_f[2]={__bssnParams->BSSN_LAMBDA_F[0],__bssnParams->BSSN_LAMBDA_F[1]}; const double kosigma=__bssnParams->KO_DISS_SIGMA; const double ETA_R0=__bssnParams->ETA_R0; const double R0=__bssnParams->ETA_R0; const double ETA_DAMPING=__bssnParams->ETA_DAMPING; const double ETA_DAMPING_EXP=__bssnParams->ETA_DAMPING_EXP; const double ETA_CONST=__bssnParams->ETA_CONST; const double eta_power[2]={__bssnParams->BSSN_ETA_POWER[0],__bssnParams->BSSN_ETA_POWER[1]}; const unsigned int NUM_SM_UNITS=__deviceProperties->multiProcessorCount; const unsigned int SM_ID=get_smid();//blockIdx.x%NUM_SM_UNITS; const unsigned int offset=dblock->getOffset(); const unsigned int *sz=dblock->getSz(); const unsigned int *alignedSz=dblock->getAlignedSz(); const double* hx=dblock->getDx(); const double dx=hx[0]; const double dy=hx[1]; const double dz=hx[2]; const double* ptmin=dblock->getPtMin(); const double* ptmax=dblock->getPtMax(); const unsigned int bflag=dblock->getBFlag(); const unsigned int tile_sz[3]={10,10,10}; double * kograd_0 = __sm_base + 0; double * kograd_1 = __sm_base + 1000; double * kograd_2 = __sm_base + 2000; double * unZipSharedOut = __sm_base + 3000; const unsigned int Lb = 3;// load begin bound const unsigned int Le = sz[0]-3;// load end bound //!! Note that we assume tile size are cubic. const unsigned int BLK_ITERATIONS_X = ((Le-Lb)<tile_sz[0])? 1: ((int)ceil((double)(Le-Lb-tile_sz[0])/(tile_sz[0]-2*0)))+1; const unsigned int BLK_ITERATIONS_Y = BLK_ITERATIONS_X; const unsigned int BLK_ITERATIONS_Z = BLK_ITERATIONS_X; unsigned int ijk_lm[3*2]; unsigned int tile_lm[3*2]; for(unsigned int iter_z=0;iter_z<BLK_ITERATIONS_Z;iter_z++){ ijk_lm[2*2+0]=max(3,(int)(3 + tile_sz[2]*iter_z -2*iter_z*0)); ijk_lm[2*2+1]=min(ijk_lm[2*2+0]+tile_sz[2]-1,sz[2]-3-1); for(unsigned int iter_y=0;iter_y<BLK_ITERATIONS_Y;iter_y++){ ijk_lm[2*1+0]=max(3,(int)(3 + tile_sz[1]*iter_y -2*iter_y*0)); ijk_lm[2*1+1]=min(ijk_lm[2*1+0]+tile_sz[1]-1,sz[1]-3-1); for(unsigned int iter_x=0;iter_x<BLK_ITERATIONS_X;iter_x++){ ijk_lm[2*0+0]=max(3,(int)(3 + tile_sz[0]*iter_x -2*iter_x*0)); ijk_lm[2*0+1]=min(ijk_lm[2*0+0]+tile_sz[0]-1,sz[0]-3-1); tile_lm[0]=0; tile_lm[1]=ijk_lm[1] - ijk_lm[0]; tile_lm[2]=0; tile_lm[3]=ijk_lm[3] - ijk_lm[2]; tile_lm[4]=0; tile_lm[5]=ijk_lm[5] - ijk_lm[4]; //if(threadIdx.x ==0 && threadIdx.y==0 && threadIdx.z==0) //printf(" iter %d %d %d : threadid (%d,%d,%d) tile begin: (%d,%d,%d) tile end: (%d,%d,%d) \n",iter_x,iter_y,iter_z, threadIdx.x,threadIdx.y,threadIdx.z,ijk_lm[0],ijk_lm[2],ijk_lm[4],ijk_lm[1],ijk_lm[3],ijk_lm[5]); unsigned int pp; //ko dissipation for variable alpha cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_alpha[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_ALPHA][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_ALPHA][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable beta2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_beta2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_BETA2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_BETA2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt3 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt4 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable gt5 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_gt5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMGT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable chi cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_chi[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_CHI][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_CHI][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At3 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At3[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT3][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At4 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At4[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT4][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable At5 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_At5[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_SYMAT5][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable K cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_K[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_K][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_K][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable Gt2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_Gt2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_GT2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_GT2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B0 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B0[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B0][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B0][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B1 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B1[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B1][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B1][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); //ko dissipation for variable B2 cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_0_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_0,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_1_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_1,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&(__derivWorkspace->__kograd_2_B2[(stream_id*(__derivWorkspace->__szPerStream) + SM_ID*(__derivWorkspace->__maxBlkSz))]),(double *) kograd_2,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); cuda::__loadGlobalToShared3D<double>(&__unzipOutVar[cuda::VAR::U_B2][offset],(double *) unZipSharedOut,(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz,(const unsigned int *) tile_sz); __syncthreads(); if(!((threadIdx.x>(ijk_lm[1]-ijk_lm[0])) || (threadIdx.y>(ijk_lm[3]-ijk_lm[2]))) ){ pp=0*tile_sz[0]*tile_sz[1]+threadIdx.y*tile_sz[1]+threadIdx.x; for(unsigned int k=0;k<=(ijk_lm[5]-ijk_lm[4]);++k,pp+=tile_sz[0]*tile_sz[1]){ unZipSharedOut[pp] += kosigma * (kograd_0[pp] +kograd_1[pp] + kograd_2[pp]); } //loop z end }// end of the if for the thread idx __syncthreads(); // sotre computed variables cuda::__storeSharedToGlobal3D<double>(unZipSharedOut, &__unzipOutVar[cuda::VAR::U_B2][offset],(const unsigned int *) ijk_lm,(const unsigned int *) alignedSz, (const unsigned int *) tile_lm,(const unsigned int *) tile_sz); __syncthreads(); } // end of block assigned to gpu block loop x } // end of block assigned to gpu block loop y } // end of block assigned to gpu block loop z }// end of function __ko_dissipation }// end of namespace cuda
c1ca6ebcdaffb1d33653158f85f7f511057a8113.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB 3 #define WC 3 #define HC 3 void Init(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* A,float* B,float* C,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } // write to device mem C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); Init(h_A, size_A); Init(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } float* d_A; float* d_B; float* d_C; hipMalloc((void**) &d_A, mem_size_A); hipMalloc((void**) &d_B, mem_size_B); hipMalloc((void**) &d_C, mem_size_C); hipMemcpy(d_A, h_A,mem_size_A ,hipMemcpyHostToDevice); hipMemcpy(d_B, h_B,mem_size_B ,hipMemcpyHostToDevice); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); hipLaunchKernelGGL(( matrixMul), dim3(grid) , dim3(threads) , 0, 0, d_A,d_B, d_C, WA, WB); hipMemcpy(h_C, d_C, mem_size_C ,hipMemcpyDeviceToHost); printf("\n\nMatrix C (Results) \n"); for(int i = 0;i<size_C; i ++){ printf("%f ",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); hipFree(d_A); hipFree(d_B); hipFree(d_C); free(h_A); free(h_B); free(h_C); }
c1ca6ebcdaffb1d33653158f85f7f511057a8113.cu
#include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB 3 #define WC 3 #define HC 3 void Init(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* A,float* B,float* C,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } // write to device mem C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); Init(h_A, size_A); Init(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } float* d_A; float* d_B; float* d_C; cudaMalloc((void**) &d_A, mem_size_A); cudaMalloc((void**) &d_B, mem_size_B); cudaMalloc((void**) &d_C, mem_size_C); cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); matrixMul<<< grid , threads >>>(d_A,d_B, d_C, WA, WB); cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost); printf("\n\nMatrix C (Results) \n"); for(int i = 0;i<size_C; i ++){ printf("%f ",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
06c6e7997d4becc98cfcc4616b74090a019d217a.hip
// !!! This is a file automatically generated by hipify!!! // Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" #include "tnn/utils/data_type_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(Flatten, LAYER_FLATTEN); Status CudaFlattenLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaFlattenLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaFlattenLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { int count = DimsVectorUtils::Count(outputs[0]->GetBlobDesc().dims); auto input_dims = inputs[0]->GetBlobDesc().dims; auto data_byte_size = DataTypeUtils::GetBytesSize(outputs[0]->GetBlobDesc().data_type); auto size_in_bytes = DimsVectorUtils::Count(input_dims) * data_byte_size; hipMemcpyAsync(outputs[0]->GetHandle().base, inputs[0]->GetHandle().base, size_in_bytes, hipMemcpyDeviceToDevice, context_->GetStream()); return TNN_OK; } REGISTER_CUDA_ACC(Flatten, LAYER_FLATTEN); } // namespace TNN_NS
06c6e7997d4becc98cfcc4616b74090a019d217a.cu
// Tencent is pleased to support the open source community by making TNN available. // // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. #include "tnn/device/cuda/acc/cuda_layer_acc.h" #include "tnn/utils/dims_utils.h" #include "tnn/utils/data_type_utils.h" namespace TNN_NS { DECLARE_CUDA_ACC(Flatten, LAYER_FLATTEN); Status CudaFlattenLayerAcc::Init(Context *context, LayerParam *param, LayerResource *resource, const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return CudaLayerAcc::Init(context, param, resource, inputs, outputs); } Status CudaFlattenLayerAcc::Reshape(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { return TNN_OK; } Status CudaFlattenLayerAcc::Forward(const std::vector<Blob *> &inputs, const std::vector<Blob *> &outputs) { int count = DimsVectorUtils::Count(outputs[0]->GetBlobDesc().dims); auto input_dims = inputs[0]->GetBlobDesc().dims; auto data_byte_size = DataTypeUtils::GetBytesSize(outputs[0]->GetBlobDesc().data_type); auto size_in_bytes = DimsVectorUtils::Count(input_dims) * data_byte_size; cudaMemcpyAsync(outputs[0]->GetHandle().base, inputs[0]->GetHandle().base, size_in_bytes, cudaMemcpyDeviceToDevice, context_->GetStream()); return TNN_OK; } REGISTER_CUDA_ACC(Flatten, LAYER_FLATTEN); } // namespace TNN_NS
15b5f4cbf2ccd6dd04a178098daecdb16efb0720.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/tsa/arima_common.h> #include <cuml/tsa/batched_arima.hpp> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include <rmm/device_uvector.hpp> #include <raft/cudart_utils.h> #include "benchmark.cuh" namespace ML { namespace Bench { namespace Arima { struct ArimaParams { TimeSeriesParams data; ARIMAOrder order; }; template <typename DataT> class ArimaLoglikelihood : public TsFixtureRandom<DataT> { public: ArimaLoglikelihood(const std::string& name, const ArimaParams& p) : TsFixtureRandom<DataT>(name, p.data), order(p.order), param(0, rmm::cuda_stream_default), loglike(0, rmm::cuda_stream_default), temp_mem(0, rmm::cuda_stream_default) { } // Note: public function because of the __device__ lambda void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; auto& handle = *this->handle; auto stream = handle.get_stream(); auto counting = thrust::make_counting_iterator(0); // Generate random parameters int N = order.complexity(); raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox); gpu_gen.uniform(param.data(), N * this->params.batch_size, -1.0, 1.0, stream); // Set sigma2 parameters to 1.0 DataT* x = param.data(); // copy the object attribute for thrust thrust::for_each(thrust::hip::par.on(stream), counting, counting + this->params.batch_size, [=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; }); CUDA_CHECK(hipStreamSynchronize(stream)); // Benchmark loop this->loopOnState(state, [this]() { ARIMAMemory<double> arima_mem( order, this->params.batch_size, this->params.n_obs, temp_mem.data()); // Evaluate log-likelihood batched_loglike(*this->handle, arima_mem, this->data.X.data(), this->params.batch_size, this->params.n_obs, order, param.data(), loglike.data(), true, false); }); } void allocateBuffers(const ::benchmark::State& state) { Fixture::allocateBuffers(state); auto& handle = *this->handle; auto stream = handle.get_stream(); // Buffer for the model parameters param.resize(order.complexity() * this->params.batch_size, stream); // Buffers for the log-likelihood loglike.resize(this->params.batch_size, stream); // Temporary memory size_t temp_buf_size = ARIMAMemory<double>::compute_size(order, this->params.batch_size, this->params.n_obs); temp_mem.resize(temp_buf_size, stream); } void deallocateBuffers(const ::benchmark::State& state) { Fixture::deallocateBuffers(state); } protected: ARIMAOrder order; rmm::device_uvector<DataT> param; rmm::device_uvector<DataT> loglike; rmm::device_uvector<char> temp_mem; }; std::vector<ArimaParams> getInputs() { struct std::vector<ArimaParams> out; ArimaParams p; p.data.seed = 12345ULL; std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 4, 0}, {1, 1, 1, 1, 1, 1, 12, 0}, {1, 1, 1, 1, 1, 1, 24, 0}, {1, 1, 1, 1, 1, 1, 52, 0}}; std::vector<int> list_batch_size = {10, 100, 1000, 10000}; std::vector<int> list_n_obs = {200, 500, 1000}; for (auto& order : list_order) { for (auto& batch_size : list_batch_size) { for (auto& n_obs : list_n_obs) { p.order = order; p.data.batch_size = batch_size; p.data.n_obs = n_obs; out.push_back(p); } } } return out; } ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs()); } // namespace Arima } // namespace Bench } // namespace ML
15b5f4cbf2ccd6dd04a178098daecdb16efb0720.cu
/* * Copyright (c) 2020-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/execution_policy.h> #include <thrust/for_each.h> #include <thrust/iterator/counting_iterator.h> #include <cuml/tsa/arima_common.h> #include <cuml/tsa/batched_arima.hpp> #include <raft/handle.hpp> #include <raft/random/rng.cuh> #include <rmm/device_uvector.hpp> #include <raft/cudart_utils.h> #include "benchmark.cuh" namespace ML { namespace Bench { namespace Arima { struct ArimaParams { TimeSeriesParams data; ARIMAOrder order; }; template <typename DataT> class ArimaLoglikelihood : public TsFixtureRandom<DataT> { public: ArimaLoglikelihood(const std::string& name, const ArimaParams& p) : TsFixtureRandom<DataT>(name, p.data), order(p.order), param(0, rmm::cuda_stream_default), loglike(0, rmm::cuda_stream_default), temp_mem(0, rmm::cuda_stream_default) { } // Note: public function because of the __device__ lambda void runBenchmark(::benchmark::State& state) override { using MLCommon::Bench::CudaEventTimer; auto& handle = *this->handle; auto stream = handle.get_stream(); auto counting = thrust::make_counting_iterator(0); // Generate random parameters int N = order.complexity(); raft::random::Rng gpu_gen(this->params.seed, raft::random::GenPhilox); gpu_gen.uniform(param.data(), N * this->params.batch_size, -1.0, 1.0, stream); // Set sigma2 parameters to 1.0 DataT* x = param.data(); // copy the object attribute for thrust thrust::for_each(thrust::cuda::par.on(stream), counting, counting + this->params.batch_size, [=] __device__(int bid) { x[(bid + 1) * N - 1] = 1.0; }); CUDA_CHECK(cudaStreamSynchronize(stream)); // Benchmark loop this->loopOnState(state, [this]() { ARIMAMemory<double> arima_mem( order, this->params.batch_size, this->params.n_obs, temp_mem.data()); // Evaluate log-likelihood batched_loglike(*this->handle, arima_mem, this->data.X.data(), this->params.batch_size, this->params.n_obs, order, param.data(), loglike.data(), true, false); }); } void allocateBuffers(const ::benchmark::State& state) { Fixture::allocateBuffers(state); auto& handle = *this->handle; auto stream = handle.get_stream(); // Buffer for the model parameters param.resize(order.complexity() * this->params.batch_size, stream); // Buffers for the log-likelihood loglike.resize(this->params.batch_size, stream); // Temporary memory size_t temp_buf_size = ARIMAMemory<double>::compute_size(order, this->params.batch_size, this->params.n_obs); temp_mem.resize(temp_buf_size, stream); } void deallocateBuffers(const ::benchmark::State& state) { Fixture::deallocateBuffers(state); } protected: ARIMAOrder order; rmm::device_uvector<DataT> param; rmm::device_uvector<DataT> loglike; rmm::device_uvector<char> temp_mem; }; std::vector<ArimaParams> getInputs() { struct std::vector<ArimaParams> out; ArimaParams p; p.data.seed = 12345ULL; std::vector<ARIMAOrder> list_order = {{1, 1, 1, 0, 0, 0, 0, 0}, {1, 1, 1, 1, 1, 1, 4, 0}, {1, 1, 1, 1, 1, 1, 12, 0}, {1, 1, 1, 1, 1, 1, 24, 0}, {1, 1, 1, 1, 1, 1, 52, 0}}; std::vector<int> list_batch_size = {10, 100, 1000, 10000}; std::vector<int> list_n_obs = {200, 500, 1000}; for (auto& order : list_order) { for (auto& batch_size : list_batch_size) { for (auto& n_obs : list_n_obs) { p.order = order; p.data.batch_size = batch_size; p.data.n_obs = n_obs; out.push_back(p); } } } return out; } ML_BENCH_REGISTER(ArimaParams, ArimaLoglikelihood<double>, "arima", getInputs()); } // namespace Arima } // namespace Bench } // namespace ML