hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
914cf08c77fbb9ad531ce01a7e78392d3eed0554.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <memory> #include "tensors/gpu/cuda_helpers.h" #include "tensors/tensor_operators.h" #include "training/dropper.h" #include "training/sparse_tensor.h" namespace marian { __global__ void grad_drop(float* data, float* tmp, float* residual, float* velocity, float cut_off, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; bool mask = std::abs(data[idx]) > cut_off; residual[idx] = data[idx] * !mask; // store residual if(velocity) velocity[idx] = velocity[idx] * !mask; // momentum factor masking data[idx] = data[idx] * mask; // send tmp[idx] = 1 * mask; } __global__ void grad_add_error(float* data, float* residual, float* velocity, float m, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; // momentum correction if(velocity) { velocity[idx] = m * velocity[idx] + data[idx]; data[idx] = velocity[idx] + residual[idx]; } else { data[idx] = data[idx] + residual[idx]; } } __global__ void full_abs(float* data, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; data[idx] = abs(data[idx]); } __global__ void buildIndices(float* denseData, float* denseSum, float* sparseData, int* sparseIndices, int denseSize) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= denseSize) return; int t_id = round(denseSum[idx]); if(t_id <= 0) { return; } if(idx == 0 && t_id > 0) { sparseIndices[t_id - 1] = idx; sparseData[t_id - 1] = denseData[idx]; } else if(idx > 0 && t_id > round(denseSum[idx - 1])) { sparseIndices[t_id - 1] = idx; sparseData[t_id - 1] = denseData[idx]; } } __global__ void randomSampling(float* originalData, float* data, int size, int scale, int fullSize) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= size) return; data[idx] = abs(originalData[idx * scale]); } void GradientDropBase::grad_drop_do(float* grads, float* residual, float* velocity, float* tmp, int len, float rate, float m) { int threads = 512; int blocks = 1 + len / threads; hipSetDevice(_deviceId.no); hipLaunchKernelGGL(( grad_add_error), dim3(blocks), dim3(threads), 0, 0, grads, residual, velocity, m, len); // full sort // int sortSize = len; int sortSize = min(100000, len); int blocksSample = 1 + sortSize / threads; hipLaunchKernelGGL(( randomSampling), dim3(blocksSample), dim3(threads), 0, 0, grads, tmp, sortSize, len / sortSize, len); thrust::device_ptr<float> dev_data_ptr(tmp); thrust::sort(dev_data_ptr, dev_data_ptr + sortSize); int cut_index = ::max(0, (int)(sortSize * rate) - 1); hipMemcpy(&cut_off, tmp + cut_index, sizeof(float), hipMemcpyDeviceToHost); hipLaunchKernelGGL(( grad_drop), dim3(blocks), dim3(threads), 0, 0, grads, tmp, residual, velocity, cut_off, len); } void GradientDropBase::dropGraph(Tensor t, SparseTensor destination, double rate, double momentum) { hipSetDevice(t->getDevice().no); if(!residual) { _deviceId = t->getDevice(); CUDA_CHECK(hipMalloc(&residual, sizeof(float) * t->size())); CUDA_CHECK(hipMalloc(&temp_d, sizeof(float) * t->size())); hipMemset(residual, 0, sizeof(float) * t->size()); hipMemset(temp_d, 0, sizeof(float) * t->size()); step = 0; } if(!velocity && momentum > 0.0) { CUDA_CHECK(hipMalloc(&velocity, sizeof(float) * t->size())); hipMemset(velocity, 0, sizeof(float) * t->size()); } // drop the gradients in t->data(). Also fills in feedback with the // propagated error fills temp_d with binary flag. 0 means that gradient in // that position is dropped, 1 otherwise grad_drop_do( t->data(), residual, velocity, temp_d, t->size(), rate, momentum); // do inclusive sum on temp_d, to obtain the sparse matrix location of // non-dropped gradients thrust::device_ptr<float> mask_ptr(temp_d); int denseSize = t->size(); thrust::inclusive_scan(mask_ptr, mask_ptr + denseSize, mask_ptr); float sparseSize; hipMemcpy(&sparseSize, temp_d + denseSize - 1, sizeof(float), hipMemcpyDeviceToHost); int threads = 512; int blocks = 1 + denseSize / threads; hipSetDevice(t->getDevice().no); // std::cout<<sparseSize<<" / "<<destination->capacity()<<std::endl; hipLaunchKernelGGL(( buildIndices), dim3(blocks), dim3(threads), 0, 0, t->data(), temp_d, destination->data(), destination->indices(), denseSize); destination->setSize(sparseSize); hipStreamSynchronize(0); step++; } }
914cf08c77fbb9ad531ce01a7e78392d3eed0554.cu
#include <curand.h> #include <curand_kernel.h> #include <thrust/device_ptr.h> #include <thrust/sort.h> #include <memory> #include "tensors/gpu/cuda_helpers.h" #include "tensors/tensor_operators.h" #include "training/dropper.h" #include "training/sparse_tensor.h" namespace marian { __global__ void grad_drop(float* data, float* tmp, float* residual, float* velocity, float cut_off, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; bool mask = std::abs(data[idx]) > cut_off; residual[idx] = data[idx] * !mask; // store residual if(velocity) velocity[idx] = velocity[idx] * !mask; // momentum factor masking data[idx] = data[idx] * mask; // send tmp[idx] = 1 * mask; } __global__ void grad_add_error(float* data, float* residual, float* velocity, float m, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; // momentum correction if(velocity) { velocity[idx] = m * velocity[idx] + data[idx]; data[idx] = velocity[idx] + residual[idx]; } else { data[idx] = data[idx] + residual[idx]; } } __global__ void full_abs(float* data, int max_size) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= max_size) return; data[idx] = abs(data[idx]); } __global__ void buildIndices(float* denseData, float* denseSum, float* sparseData, int* sparseIndices, int denseSize) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= denseSize) return; int t_id = round(denseSum[idx]); if(t_id <= 0) { return; } if(idx == 0 && t_id > 0) { sparseIndices[t_id - 1] = idx; sparseData[t_id - 1] = denseData[idx]; } else if(idx > 0 && t_id > round(denseSum[idx - 1])) { sparseIndices[t_id - 1] = idx; sparseData[t_id - 1] = denseData[idx]; } } __global__ void randomSampling(float* originalData, float* data, int size, int scale, int fullSize) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if(idx >= size) return; data[idx] = abs(originalData[idx * scale]); } void GradientDropBase::grad_drop_do(float* grads, float* residual, float* velocity, float* tmp, int len, float rate, float m) { int threads = 512; int blocks = 1 + len / threads; cudaSetDevice(_deviceId.no); grad_add_error<<<blocks, threads>>>(grads, residual, velocity, m, len); // full sort // int sortSize = len; int sortSize = min(100000, len); int blocksSample = 1 + sortSize / threads; randomSampling<<<blocksSample, threads>>>( grads, tmp, sortSize, len / sortSize, len); thrust::device_ptr<float> dev_data_ptr(tmp); thrust::sort(dev_data_ptr, dev_data_ptr + sortSize); int cut_index = std::max(0, (int)(sortSize * rate) - 1); cudaMemcpy(&cut_off, tmp + cut_index, sizeof(float), cudaMemcpyDeviceToHost); grad_drop<<<blocks, threads>>>(grads, tmp, residual, velocity, cut_off, len); } void GradientDropBase::dropGraph(Tensor t, SparseTensor destination, double rate, double momentum) { cudaSetDevice(t->getDevice().no); if(!residual) { _deviceId = t->getDevice(); CUDA_CHECK(cudaMalloc(&residual, sizeof(float) * t->size())); CUDA_CHECK(cudaMalloc(&temp_d, sizeof(float) * t->size())); cudaMemset(residual, 0, sizeof(float) * t->size()); cudaMemset(temp_d, 0, sizeof(float) * t->size()); step = 0; } if(!velocity && momentum > 0.0) { CUDA_CHECK(cudaMalloc(&velocity, sizeof(float) * t->size())); cudaMemset(velocity, 0, sizeof(float) * t->size()); } // drop the gradients in t->data(). Also fills in feedback with the // propagated error fills temp_d with binary flag. 0 means that gradient in // that position is dropped, 1 otherwise grad_drop_do( t->data(), residual, velocity, temp_d, t->size(), rate, momentum); // do inclusive sum on temp_d, to obtain the sparse matrix location of // non-dropped gradients thrust::device_ptr<float> mask_ptr(temp_d); int denseSize = t->size(); thrust::inclusive_scan(mask_ptr, mask_ptr + denseSize, mask_ptr); float sparseSize; cudaMemcpy(&sparseSize, temp_d + denseSize - 1, sizeof(float), cudaMemcpyDeviceToHost); int threads = 512; int blocks = 1 + denseSize / threads; cudaSetDevice(t->getDevice().no); // std::cout<<sparseSize<<" / "<<destination->capacity()<<std::endl; buildIndices<<<blocks, threads>>>(t->data(), temp_d, destination->data(), destination->indices(), denseSize); destination->setSize(sparseSize); cudaStreamSynchronize(0); step++; } }
8a10ad2392959e18268cc472ad81eb71af100af8.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include"../common/book.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include<math.h> #define N 3 /*Defining kernel function matrix multiplication which takes 3 arguments A,B,C these are matrix of 3*3 A & B are input matrix and matrix C is the product of A & B */ __global__ void matrixMult(float *A, float *B, float *C) { float Sum_Matrix = 0; int row = threadIdx.y + blockDim.y * blockIdx.y; // row represents the indices of matrix A int col = threadIdx.x + blockDim.x * blockIdx.x; // col represents the indices of matrix B //printf("ThreadIdx.x : %d\tblockDim.x : %d\tblockIdx.x : %d\tThreadIdx.y : %d\tblockDim.y : %d\tblockIdx.y :%d\t\n", threadIdx.x, blockDim.x, blockIdx.x, threadIdx.y, blockDim.y, blockIdx.y); int index = row * N + col; //printf("Index = %d\n",index); if (row < N && col < N) { // each thread computes one element of the block sub- matrix for (int i = 0; i < N; ++i) { Sum_Matrix += A[i + row * N] * B[col + i * N]; } } C[index] = Sum_Matrix; } int main() { /*Declaring matrix A & B of size N*N */ float A[N][N], B[N][N], C[N][N]; int i, j; // Declaring i for row and j for column of matrix /* creating three 2D arrays */ float *dev_a, *dev_b, *dev_c; /*--------Taking user input for matrix A elements----------*/ printf("\n Enter elements of first matrix A of size %d * %d\n", N, N); for(i = 0; i<N; i++) // i is representing row of matrix A { for(j = 0; j<N; j++) // j is representing column of matrix A { printf("Enter the element A[%d][%d] : ", i, j); scanf("%f", &A[i][j]); } } /*--------Taking user input for matrix B elements----------*/ printf("\n Enter elements of second matrix B of size %d * %d\n", N, N); for(i = 0; i<N; i++) // i is representing row of matrix B { for(j = 0; j<N; j++) // j is representing column of matrix B { printf("Enter the element B[%d][%d] : ", i, j); scanf("%f", &B[i][j]); } } /*--------Allocating memory in GPU by using hipMalloc----------*/ hipMalloc((void**)&dev_a, (N*N) * sizeof(float)); hipMalloc((void**)&dev_b, (N*N) * sizeof(float)); hipMalloc((void**)&dev_c, (N*N) * sizeof(float)); /*--------Copying elements of 2D array A, B from host(CPU) to device(GPU) by using hipMemcpy----------*/ hipMemcpy(dev_a, A, (N*N) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_b, B, (N*N) * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(dev_c, C, (N*N) * sizeof(float), hipMemcpyHostToDevice); /*---------Calling kernel function-------------*/ dim3 blocksPerGrid(1, 1); // Number of blocks is 1 dim3 threadsPerBlock(N, N); // Number of threadsPerBlock is 9 (3*3) hipLaunchKernelGGL(( matrixMult), dim3(blocksPerGrid),dim3(threadsPerBlock) , 0, 0, dev_a, dev_b, dev_c); // Calling kernel function with 1 block and 9 threads per block hipDeviceSynchronize(); // synchronizing CPU with GPU /*-------- After the GPU kernel function executes it copies the 2D array back from GPU to CPU ----------------*/ hipMemcpy(C, dev_c, (N*N) * sizeof(float), hipMemcpyDeviceToHost); /*-----------------printing the product of two matrix A & B --------------------*/ printf("\n"); printf("Product of two matrix A and B is :\n\n "); for(i = 0; i<N; i++) { for(j = 0; j<N; j++) { printf("%.2f\t\t", C[i][j]); } printf("\n"); } // Free the memory allocated in GPU hipFree(dev_a); hipFree(dev_b); hipFree(dev_c); return 0; }
8a10ad2392959e18268cc472ad81eb71af100af8.cu
#include <stdio.h> #include <stdlib.h> #include"../common/book.h" #include <cuda.h> #include <cuda_runtime.h> #include<math.h> #define N 3 /*Defining kernel function matrix multiplication which takes 3 arguments A,B,C these are matrix of 3*3 A & B are input matrix and matrix C is the product of A & B */ __global__ void matrixMult(float *A, float *B, float *C) { float Sum_Matrix = 0; int row = threadIdx.y + blockDim.y * blockIdx.y; // row represents the indices of matrix A int col = threadIdx.x + blockDim.x * blockIdx.x; // col represents the indices of matrix B //printf("ThreadIdx.x : %d\tblockDim.x : %d\tblockIdx.x : %d\tThreadIdx.y : %d\tblockDim.y : %d\tblockIdx.y :%d\t\n", threadIdx.x, blockDim.x, blockIdx.x, threadIdx.y, blockDim.y, blockIdx.y); int index = row * N + col; //printf("Index = %d\n",index); if (row < N && col < N) { // each thread computes one element of the block sub- matrix for (int i = 0; i < N; ++i) { Sum_Matrix += A[i + row * N] * B[col + i * N]; } } C[index] = Sum_Matrix; } int main() { /*Declaring matrix A & B of size N*N */ float A[N][N], B[N][N], C[N][N]; int i, j; // Declaring i for row and j for column of matrix /* creating three 2D arrays */ float *dev_a, *dev_b, *dev_c; /*--------Taking user input for matrix A elements----------*/ printf("\n Enter elements of first matrix A of size %d * %d\n", N, N); for(i = 0; i<N; i++) // i is representing row of matrix A { for(j = 0; j<N; j++) // j is representing column of matrix A { printf("Enter the element A[%d][%d] : ", i, j); scanf("%f", &A[i][j]); } } /*--------Taking user input for matrix B elements----------*/ printf("\n Enter elements of second matrix B of size %d * %d\n", N, N); for(i = 0; i<N; i++) // i is representing row of matrix B { for(j = 0; j<N; j++) // j is representing column of matrix B { printf("Enter the element B[%d][%d] : ", i, j); scanf("%f", &B[i][j]); } } /*--------Allocating memory in GPU by using cudaMalloc----------*/ cudaMalloc((void**)&dev_a, (N*N) * sizeof(float)); cudaMalloc((void**)&dev_b, (N*N) * sizeof(float)); cudaMalloc((void**)&dev_c, (N*N) * sizeof(float)); /*--------Copying elements of 2D array A, B from host(CPU) to device(GPU) by using cudaMemcpy----------*/ cudaMemcpy(dev_a, A, (N*N) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, B, (N*N) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, C, (N*N) * sizeof(float), cudaMemcpyHostToDevice); /*---------Calling kernel function-------------*/ dim3 blocksPerGrid(1, 1); // Number of blocks is 1 dim3 threadsPerBlock(N, N); // Number of threadsPerBlock is 9 (3*3) matrixMult<<< blocksPerGrid,threadsPerBlock >>>(dev_a, dev_b, dev_c); // Calling kernel function with 1 block and 9 threads per block cudaThreadSynchronize(); // synchronizing CPU with GPU /*-------- After the GPU kernel function executes it copies the 2D array back from GPU to CPU ----------------*/ cudaMemcpy(C, dev_c, (N*N) * sizeof(float), cudaMemcpyDeviceToHost); /*-----------------printing the product of two matrix A & B --------------------*/ printf("\n"); printf("Product of two matrix A and B is :\n\n "); for(i = 0; i<N; i++) { for(j = 0; j<N; j++) { printf("%.2f\t\t", C[i][j]); } printf("\n"); } // Free the memory allocated in GPU cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
502b921bd2fd343ec2f1b2e1aae989421e8bc88f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Brooke Husic and Jared Dunnmon * Final project CME 253 * Due Feb 17 2017 */ #include <fstream> #include <iostream> #include <math.h> #include <vector> #include <chrono> #include "./debug.h" #ifdef DEBUG #define CUDA_CALL(F) if( (F) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \ {printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 #define THREADS_PER_BLOCK_Z 4 typedef std::chrono::high_resolution_clock Clock; /* input protein file and get its xyz coordinates */ void ProteinSetup(std::string protein_inputfile, std::vector<int>& prot_atomnums, std::vector<int>& prot_resnums, std::vector<std::vector<double>>& prot_xyz_coords){ std::ifstream f(protein_inputfile.c_str()); if (f.is_open()) { std::string klass, code, resname, chain; int atomnum, resnum; double x, y, z, occ, temp; while (f >> klass >> atomnum >> code >> resname >> chain >> resnum >> x >> y >> z >> occ >> temp){ std::vector<double> temp_coord; temp_coord.push_back(x); temp_coord.push_back(y); temp_coord.push_back(z); prot_atomnums.push_back(atomnum); prot_resnums.push_back(resnum); prot_xyz_coords.push_back(temp_coord); } // some checks if(prot_atomnums.size() != prot_resnums.size()){ std::cerr << "ERROR: Problem in protein file" << std::endl; } if(prot_atomnums.size() != prot_xyz_coords.size()){ std::cerr << "ERROR: Problem in protein file" << std::endl; } } std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl; } /* input ligand file and get its xyz coordinates */ void LigandTrajSetup(std::string ligand_inputfile, std::vector<int>& lig_trajnums, std::vector<int>& lig_atomnums, std::vector<int>& lig_resnums, std::vector<std::vector<double>>& lig_xyz_coords){ std::ifstream f(ligand_inputfile.c_str()); if (f.is_open()) { std::string klass, code, resname, chain; int trajnum, atomnum, resnum; double x, y, z, occ, temp; while (f >> trajnum >> klass >> atomnum >> code >> resname >> chain >> resnum >> x >> y >> z >> occ >> temp){ std::vector<double> temp_coord; temp_coord.push_back(x); temp_coord.push_back(y); temp_coord.push_back(z); lig_trajnums.push_back(trajnum); lig_atomnums.push_back(atomnum); lig_resnums.push_back(resnum); lig_xyz_coords.push_back(temp_coord); } // some checks if(lig_atomnums.size() != lig_trajnums.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } if(lig_atomnums.size() != lig_resnums.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } if(lig_atomnums.size() != lig_xyz_coords.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } } std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl; std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms } /* simple squared distance */ double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){ double dist_squared; dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0]) + (v1[1]-v2[1])*(v1[1]-v2[1]) + (v1[2]-v2[2])*(v1[2]-v2[2]) }; return dist_squared; } /* cpp contact featurizer */ std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums, std::vector<std::vector<double>>& prot_xyz_coords, std::vector<int>& lig_trajnums, std::vector<std::vector<double>>& lig_xyz_coords){ std::vector<double> all_distances; for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){ for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){ double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii], prot_xyz_coords[jj]); temp_dist = sqrt(temp_dist)/10.; all_distances.push_back(temp_dist); } } return all_distances; } /* cuda contact featurizer */ // __global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength) // { // int pidx = threadIdx.x + blockIdx.x * blockDim.x; // int lidx = threadIdx.y + blockIdx.y * blockDim.y; // if ( (pidx < plength[0]) && (lidx< llength[0])){ // cudists[pidx+plength[0]*lidx] = ( sqrt( // (pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3]) // + (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1]) // + (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. ); // } // __syncthreads(); // } /* cuda contact featurizer */ __global__ void cuContactsRED(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength) { int pidx = threadIdx.x + blockIdx.x * blockDim.x; int lidx = threadIdx.y + blockIdx.y * blockDim.y; int task = threadIdx.z + blockIdx.z * blockDim.z; __shared__ double temp[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y][THREADS_PER_BLOCK_Z]; if ( (pidx < plength[0]) && (lidx< llength[0]) ){ temp[threadIdx.x][threadIdx.y][threadIdx.z] = (pxyz[pidx*3+task%4]-lxyz[lidx*3+task%4]) *(pxyz[pidx*3+task%4]-lxyz[lidx*3+task%4]); } __syncthreads(); if ( (pidx < plength[0]) && (lidx< llength[0]) && threadIdx.z % 4 == 0){ cudists[pidx+plength[0]*lidx] = sqrt(temp[threadIdx.x][threadIdx.y][threadIdx.z] + temp[threadIdx.x][threadIdx.y][threadIdx.z] + temp[threadIdx.x][threadIdx.y][threadIdx.z])/10.; } } int main(int argc, char *argv[]) { if (argc != 3) { std::cout << "Usage:" << std::endl; {std::cout << " " << argv[0] << " <protein input file> " << " <ligand input file> " << std::endl;} return 0; } std::string protein_inputfile = argv[1]; std::string ligand_inputfile = argv[2]; std::vector<int> prot_atomnums; std::vector<int> prot_resnums; std::vector<std::vector<double>> prot_xyz_coords; std::vector<int> lig_trajnums; std::vector<int> lig_atomnums; std::vector<int> lig_resnums; std::vector<std::vector<double>> lig_xyz_coords; ProteinSetup(protein_inputfile, prot_atomnums, prot_resnums, prot_xyz_coords); LigandTrajSetup(ligand_inputfile, lig_trajnums, lig_atomnums, lig_resnums, lig_xyz_coords); auto cpp_start = Clock::now(); /* compute distances using cpp*/ std::vector<double> distances = LPContactFeaturizer(prot_atomnums, prot_xyz_coords, lig_trajnums, lig_xyz_coords); auto cpp_end = Clock::now(); /* print out cpp time stats */ std::cout << "Number of distances to compute : " << distances.size() << std::endl; std::cout << "Cpp distances calculated in " << std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count() << " microseconds" << std::endl; double *pxyz, *lxyz, *cudists; double *d_pxyz, *d_lxyz, *d_cudists; int *plength, *d_plength; int *llength, *d_llength; int protein_size = prot_atomnums.size()*3; int ligand_traj_size = lig_trajnums.size()*3; int cudists_size = protein_size/3 * ligand_traj_size/3; /* get GPU device number and name */ int dev; hipDeviceProp_t deviceProp; checkCUDA( hipGetDevice( &dev ) ); checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of a, b, c */ checkCUDA( hipMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) ); checkCUDA( hipMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) ); checkCUDA( hipMalloc( (void **) &d_cudists, cudists_size*sizeof(double) )); checkCUDA( hipMalloc( (void **) &d_plength, sizeof(int) )); checkCUDA( hipMalloc( (void **) &d_llength, sizeof(int) )); /* allocate space for host copies of a, b, c and setup input values */ pxyz = (double *)malloc( protein_size *sizeof(double)); lxyz = (double *)malloc( ligand_traj_size *sizeof(double)); cudists = (double *)malloc( cudists_size *sizeof(double)); plength = (int *)malloc( sizeof(int)); llength = (int *)malloc( sizeof(int)); for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){ pxyz[pp*3] = prot_xyz_coords[pp][0]; pxyz[pp*3+1] = prot_xyz_coords[pp][1]; pxyz[pp*3+2] = prot_xyz_coords[pp][2]; } for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){ lxyz[ll*3] = lig_xyz_coords[ll][0]; lxyz[ll*3+1] = lig_xyz_coords[ll][1]; lxyz[ll*3+2] = lig_xyz_coords[ll][2]; } plength[0] = prot_atomnums.size(); llength[0] = lig_trajnums.size(); /* copy inputs to device */ checkCUDA( hipMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), hipMemcpyHostToDevice ) ); checkCUDA( hipMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), hipMemcpyHostToDevice ) ); checkCUDA( hipMemcpy( d_plength, plength, sizeof(int), hipMemcpyHostToDevice) ); checkCUDA( hipMemcpy( d_llength, llength, sizeof(int), hipMemcpyHostToDevice) ); /* zero out the C array */ checkCUDA( hipMemset( d_cudists, 0, cudists_size*sizeof(double) ) ); /* setup threadblock size and grid sizes*/ dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, THREADS_PER_BLOCK_Z); dim3 blocks(plength[0]/threads.x+1, llength[0]/threads.y+1, threads.z ); /* check if threads and blocks are OK */ hipDeviceProp_t prop; hipGetDeviceProperties(&prop, 0); if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) { printf("Too many threads per block \n"); } if (threads.x > prop.maxThreadsDim[0]) { printf("Too many threads in x-direction \n"); } if (threads.y > prop.maxThreadsDim[1]) { printf("Too many threads in y-direction \n"); } if (threads.z > prop.maxThreadsDim[2]) { printf("Too many threads in z-direction \n"); } printf("Ready to launch kernel\n"); auto cuda_start = Clock::now(); /* launch the kernel on the GPU */ hipLaunchKernelGGL(( cuContactsRED), dim3(blocks), dim3(threads) , 0, 0, d_pxyz, d_lxyz, d_cudists, d_plength, d_llength ); checkKERNEL(); auto cuda_mid = Clock::now(); /* print out CUDA time stats */ // std::cout << "CUDA distances calculated in " // << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count() // << " microseconds" << std::endl; /* copy result back to host */ checkCUDA( hipMemcpy( cudists, d_cudists, cudists_size*sizeof(double), hipMemcpyDeviceToHost ) ); auto cuda_end = Clock::now(); // std::cout << "CUDA distances copied in " // << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count() // << " microseconds" << std::endl; std::cout << "CUDA distances calculated in: " << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count() << " microseconds" << std::endl; /* print out distance pairs to file */ std::ofstream f("distances.txt"); if(f.is_open()){ for(unsigned int k = 0; k < distances.size(); k++){ f << distances[k] << " " << cudists[k] << std::endl; } } f.close(); free(pxyz); free(lxyz); free(cudists); free(plength); checkCUDA( hipFree( d_pxyz ) ); checkCUDA( hipFree( d_lxyz ) ); checkCUDA( hipFree( d_cudists ) ); checkCUDA( hipFree( d_plength ) ); checkCUDA( hipDeviceReset () ); return 0; } /* end main */
502b921bd2fd343ec2f1b2e1aae989421e8bc88f.cu
/* Brooke Husic and Jared Dunnmon * Final project CME 253 * Due Feb 17 2017 */ #include <fstream> #include <iostream> #include <math.h> #include <vector> #include <chrono> #include "./debug.h" #ifdef DEBUG #define CUDA_CALL(F) if( (F) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__); exit(-1);} #define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \ {printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \ __FILE__,__LINE__-1); exit(-1);} #else #define CUDA_CALL(F) (F) #define CUDA_CHECK() #endif #define THREADS_PER_BLOCK_X 16 #define THREADS_PER_BLOCK_Y 16 #define THREADS_PER_BLOCK_Z 4 typedef std::chrono::high_resolution_clock Clock; /* input protein file and get its xyz coordinates */ void ProteinSetup(std::string protein_inputfile, std::vector<int>& prot_atomnums, std::vector<int>& prot_resnums, std::vector<std::vector<double>>& prot_xyz_coords){ std::ifstream f(protein_inputfile.c_str()); if (f.is_open()) { std::string klass, code, resname, chain; int atomnum, resnum; double x, y, z, occ, temp; while (f >> klass >> atomnum >> code >> resname >> chain >> resnum >> x >> y >> z >> occ >> temp){ std::vector<double> temp_coord; temp_coord.push_back(x); temp_coord.push_back(y); temp_coord.push_back(z); prot_atomnums.push_back(atomnum); prot_resnums.push_back(resnum); prot_xyz_coords.push_back(temp_coord); } // some checks if(prot_atomnums.size() != prot_resnums.size()){ std::cerr << "ERROR: Problem in protein file" << std::endl; } if(prot_atomnums.size() != prot_xyz_coords.size()){ std::cerr << "ERROR: Problem in protein file" << std::endl; } } std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl; } /* input ligand file and get its xyz coordinates */ void LigandTrajSetup(std::string ligand_inputfile, std::vector<int>& lig_trajnums, std::vector<int>& lig_atomnums, std::vector<int>& lig_resnums, std::vector<std::vector<double>>& lig_xyz_coords){ std::ifstream f(ligand_inputfile.c_str()); if (f.is_open()) { std::string klass, code, resname, chain; int trajnum, atomnum, resnum; double x, y, z, occ, temp; while (f >> trajnum >> klass >> atomnum >> code >> resname >> chain >> resnum >> x >> y >> z >> occ >> temp){ std::vector<double> temp_coord; temp_coord.push_back(x); temp_coord.push_back(y); temp_coord.push_back(z); lig_trajnums.push_back(trajnum); lig_atomnums.push_back(atomnum); lig_resnums.push_back(resnum); lig_xyz_coords.push_back(temp_coord); } // some checks if(lig_atomnums.size() != lig_trajnums.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } if(lig_atomnums.size() != lig_resnums.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } if(lig_atomnums.size() != lig_xyz_coords.size()){ std::cerr << "ERROR: Problem in ligand file" << std::endl; } } std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl; std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms } /* simple squared distance */ double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){ double dist_squared; dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0]) + (v1[1]-v2[1])*(v1[1]-v2[1]) + (v1[2]-v2[2])*(v1[2]-v2[2]) }; return dist_squared; } /* cpp contact featurizer */ std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums, std::vector<std::vector<double>>& prot_xyz_coords, std::vector<int>& lig_trajnums, std::vector<std::vector<double>>& lig_xyz_coords){ std::vector<double> all_distances; for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){ for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){ double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii], prot_xyz_coords[jj]); temp_dist = sqrt(temp_dist)/10.; all_distances.push_back(temp_dist); } } return all_distances; } /* cuda contact featurizer */ // __global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength) // { // int pidx = threadIdx.x + blockIdx.x * blockDim.x; // int lidx = threadIdx.y + blockIdx.y * blockDim.y; // if ( (pidx < plength[0]) && (lidx< llength[0])){ // cudists[pidx+plength[0]*lidx] = ( sqrt( // (pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3]) // + (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1]) // + (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. ); // } // __syncthreads(); // } /* cuda contact featurizer */ __global__ void cuContactsRED(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength) { int pidx = threadIdx.x + blockIdx.x * blockDim.x; int lidx = threadIdx.y + blockIdx.y * blockDim.y; int task = threadIdx.z + blockIdx.z * blockDim.z; __shared__ double temp[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y][THREADS_PER_BLOCK_Z]; if ( (pidx < plength[0]) && (lidx< llength[0]) ){ temp[threadIdx.x][threadIdx.y][threadIdx.z] = (pxyz[pidx*3+task%4]-lxyz[lidx*3+task%4]) *(pxyz[pidx*3+task%4]-lxyz[lidx*3+task%4]); } __syncthreads(); if ( (pidx < plength[0]) && (lidx< llength[0]) && threadIdx.z % 4 == 0){ cudists[pidx+plength[0]*lidx] = sqrt(temp[threadIdx.x][threadIdx.y][threadIdx.z] + temp[threadIdx.x][threadIdx.y][threadIdx.z] + temp[threadIdx.x][threadIdx.y][threadIdx.z])/10.; } } int main(int argc, char *argv[]) { if (argc != 3) { std::cout << "Usage:" << std::endl; {std::cout << " " << argv[0] << " <protein input file> " << " <ligand input file> " << std::endl;} return 0; } std::string protein_inputfile = argv[1]; std::string ligand_inputfile = argv[2]; std::vector<int> prot_atomnums; std::vector<int> prot_resnums; std::vector<std::vector<double>> prot_xyz_coords; std::vector<int> lig_trajnums; std::vector<int> lig_atomnums; std::vector<int> lig_resnums; std::vector<std::vector<double>> lig_xyz_coords; ProteinSetup(protein_inputfile, prot_atomnums, prot_resnums, prot_xyz_coords); LigandTrajSetup(ligand_inputfile, lig_trajnums, lig_atomnums, lig_resnums, lig_xyz_coords); auto cpp_start = Clock::now(); /* compute distances using cpp*/ std::vector<double> distances = LPContactFeaturizer(prot_atomnums, prot_xyz_coords, lig_trajnums, lig_xyz_coords); auto cpp_end = Clock::now(); /* print out cpp time stats */ std::cout << "Number of distances to compute : " << distances.size() << std::endl; std::cout << "Cpp distances calculated in " << std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count() << " microseconds" << std::endl; double *pxyz, *lxyz, *cudists; double *d_pxyz, *d_lxyz, *d_cudists; int *plength, *d_plength; int *llength, *d_llength; int protein_size = prot_atomnums.size()*3; int ligand_traj_size = lig_trajnums.size()*3; int cudists_size = protein_size/3 * ligand_traj_size/3; /* get GPU device number and name */ int dev; cudaDeviceProp deviceProp; checkCUDA( cudaGetDevice( &dev ) ); checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) ); printf("Using GPU %d: %s\n", dev, deviceProp.name ); /* allocate space for device copies of a, b, c */ checkCUDA( cudaMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) ); checkCUDA( cudaMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) ); checkCUDA( cudaMalloc( (void **) &d_cudists, cudists_size*sizeof(double) )); checkCUDA( cudaMalloc( (void **) &d_plength, sizeof(int) )); checkCUDA( cudaMalloc( (void **) &d_llength, sizeof(int) )); /* allocate space for host copies of a, b, c and setup input values */ pxyz = (double *)malloc( protein_size *sizeof(double)); lxyz = (double *)malloc( ligand_traj_size *sizeof(double)); cudists = (double *)malloc( cudists_size *sizeof(double)); plength = (int *)malloc( sizeof(int)); llength = (int *)malloc( sizeof(int)); for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){ pxyz[pp*3] = prot_xyz_coords[pp][0]; pxyz[pp*3+1] = prot_xyz_coords[pp][1]; pxyz[pp*3+2] = prot_xyz_coords[pp][2]; } for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){ lxyz[ll*3] = lig_xyz_coords[ll][0]; lxyz[ll*3+1] = lig_xyz_coords[ll][1]; lxyz[ll*3+2] = lig_xyz_coords[ll][2]; } plength[0] = prot_atomnums.size(); llength[0] = lig_trajnums.size(); /* copy inputs to device */ checkCUDA( cudaMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), cudaMemcpyHostToDevice ) ); checkCUDA( cudaMemcpy( d_plength, plength, sizeof(int), cudaMemcpyHostToDevice) ); checkCUDA( cudaMemcpy( d_llength, llength, sizeof(int), cudaMemcpyHostToDevice) ); /* zero out the C array */ checkCUDA( cudaMemset( d_cudists, 0, cudists_size*sizeof(double) ) ); /* setup threadblock size and grid sizes*/ dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, THREADS_PER_BLOCK_Z); dim3 blocks(plength[0]/threads.x+1, llength[0]/threads.y+1, threads.z ); /* check if threads and blocks are OK */ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) { printf("Too many threads per block \n"); } if (threads.x > prop.maxThreadsDim[0]) { printf("Too many threads in x-direction \n"); } if (threads.y > prop.maxThreadsDim[1]) { printf("Too many threads in y-direction \n"); } if (threads.z > prop.maxThreadsDim[2]) { printf("Too many threads in z-direction \n"); } printf("Ready to launch kernel\n"); auto cuda_start = Clock::now(); /* launch the kernel on the GPU */ cuContactsRED<<< blocks, threads >>>( d_pxyz, d_lxyz, d_cudists, d_plength, d_llength ); checkKERNEL(); auto cuda_mid = Clock::now(); /* print out CUDA time stats */ // std::cout << "CUDA distances calculated in " // << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count() // << " microseconds" << std::endl; /* copy result back to host */ checkCUDA( cudaMemcpy( cudists, d_cudists, cudists_size*sizeof(double), cudaMemcpyDeviceToHost ) ); auto cuda_end = Clock::now(); // std::cout << "CUDA distances copied in " // << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count() // << " microseconds" << std::endl; std::cout << "CUDA distances calculated in: " << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count() << " microseconds" << std::endl; /* print out distance pairs to file */ std::ofstream f("distances.txt"); if(f.is_open()){ for(unsigned int k = 0; k < distances.size(); k++){ f << distances[k] << " " << cudists[k] << std::endl; } } f.close(); free(pxyz); free(lxyz); free(cudists); free(plength); checkCUDA( cudaFree( d_pxyz ) ); checkCUDA( cudaFree( d_lxyz ) ); checkCUDA( cudaFree( d_cudists ) ); checkCUDA( cudaFree( d_plength ) ); checkCUDA( cudaDeviceReset () ); return 0; } /* end main */
0b4e5ea68a8012a6264070a0c1282cb0ec58a88a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include "file.h" #include "common.h" #include "cuerr.h" #include "kernels.hip" static int read_data(float *A0, int nx,int ny,int nz,FILE *fp) { int s=0; for(int i=0;i<nz;i++) { for(int j=0;j<ny;j++) { for(int k=0;k<nx;k++) { fread(A0+s,sizeof(float),1,fp); s++; } } } return 0; } int main(int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *parameters; // printf("CUDA accelerated 7 points stencil codes****\n"); // printf("Original version by Li-Wen Chang <[email protected]> and I-Jui Sung<[email protected]>\n"); // printf("This version maintained by Chris Rodrigues ***********\n"); parameters = pb_ReadParameters(&argc, argv); pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //declaration int nx,ny,nz; int size; int iteration; float c0=1.0f/6.0f; float c1=1.0f/6.0f/6.0f; if (argc<5) { printf("Usage: probe nx ny nz tx ty t\n" "nx: the grid size x\n" "ny: the grid size y\n" "nz: the grid size z\n" "t: the iteration time\n"); return -1; } nx = atoi(argv[1]); if (nx<1) return -1; ny = atoi(argv[2]); if (ny<1) return -1; nz = atoi(argv[3]); if (nz<1) return -1; iteration = atoi(argv[4]); if(iteration<1) return -1; //host data float *h_A0; float *h_Anext; //device float *d_A0; float *d_Anext; size=nx*ny*nz; h_A0=(float*)malloc(sizeof(float)*size); h_Anext=(float*)malloc(sizeof(float)*size); pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(parameters->inpFiles[0], "rb"); read_data(h_A0, nx,ny,nz,fp); fclose(fp); pb_SwitchToTimer(&timers, pb_TimerID_COPY); //memory allocation hipMalloc((void **)&d_A0, size*sizeof(float)); hipMalloc((void **)&d_Anext, size*sizeof(float)); hipMemset(d_Anext,0,size*sizeof(float)); //memory copy hipMemcpy(d_A0, h_A0, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_Anext, d_A0, size*sizeof(float), hipMemcpyDeviceToDevice); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //only use 1D thread block dim3 block (nx-1, 1, 1); dim3 grid (ny-2, nz-2,1); //main execution pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); for(int t=0;t<iteration;t++) { hipLaunchKernelGGL(( naive_kernel), dim3(grid), dim3(block), 0, 0, c0,c1, d_A0, d_Anext, nx, ny, nz); float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; } CUERR // check and clear any existing errorsi float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipMemcpy(h_Anext, d_Anext,size*sizeof(float), hipMemcpyDeviceToHost); hipDeviceSynchronize(); hipFree(d_A0); hipFree(d_Anext); if (parameters->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(parameters->outFile,h_Anext,nx,ny,nz); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free (h_A0); free (h_Anext); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(parameters); return 0; }
0b4e5ea68a8012a6264070a0c1282cb0ec58a88a.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ #include <parboil.h> #include <stdio.h> #include <stdlib.h> #include "file.h" #include "common.h" #include "cuerr.h" #include "kernels.cu" static int read_data(float *A0, int nx,int ny,int nz,FILE *fp) { int s=0; for(int i=0;i<nz;i++) { for(int j=0;j<ny;j++) { for(int k=0;k<nx;k++) { fread(A0+s,sizeof(float),1,fp); s++; } } } return 0; } int main(int argc, char** argv) { struct pb_TimerSet timers; struct pb_Parameters *parameters; // printf("CUDA accelerated 7 points stencil codes****\n"); // printf("Original version by Li-Wen Chang <[email protected]> and I-Jui Sung<[email protected]>\n"); // printf("This version maintained by Chris Rodrigues ***********\n"); parameters = pb_ReadParameters(&argc, argv); pb_InitializeTimerSet(&timers); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //declaration int nx,ny,nz; int size; int iteration; float c0=1.0f/6.0f; float c1=1.0f/6.0f/6.0f; if (argc<5) { printf("Usage: probe nx ny nz tx ty t\n" "nx: the grid size x\n" "ny: the grid size y\n" "nz: the grid size z\n" "t: the iteration time\n"); return -1; } nx = atoi(argv[1]); if (nx<1) return -1; ny = atoi(argv[2]); if (ny<1) return -1; nz = atoi(argv[3]); if (nz<1) return -1; iteration = atoi(argv[4]); if(iteration<1) return -1; //host data float *h_A0; float *h_Anext; //device float *d_A0; float *d_Anext; size=nx*ny*nz; h_A0=(float*)malloc(sizeof(float)*size); h_Anext=(float*)malloc(sizeof(float)*size); pb_SwitchToTimer(&timers, pb_TimerID_IO); FILE *fp = fopen(parameters->inpFiles[0], "rb"); read_data(h_A0, nx,ny,nz,fp); fclose(fp); pb_SwitchToTimer(&timers, pb_TimerID_COPY); //memory allocation cudaMalloc((void **)&d_A0, size*sizeof(float)); cudaMalloc((void **)&d_Anext, size*sizeof(float)); cudaMemset(d_Anext,0,size*sizeof(float)); //memory copy cudaMemcpy(d_A0, h_A0, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_Anext, d_A0, size*sizeof(float), cudaMemcpyDeviceToDevice); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); //only use 1D thread block dim3 block (nx-1, 1, 1); dim3 grid (ny-2, nz-2,1); //main execution pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); for(int t=0;t<iteration;t++) { naive_kernel<<<grid, block>>>(c0,c1, d_A0, d_Anext, nx, ny, nz); float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; } CUERR // check and clear any existing errorsi float *d_temp = d_A0; d_A0 = d_Anext; d_Anext = d_temp; cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaMemcpy(h_Anext, d_Anext,size*sizeof(float), cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaFree(d_A0); cudaFree(d_Anext); if (parameters->outFile) { pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(parameters->outFile,h_Anext,nx,ny,nz); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); free (h_A0); free (h_Anext); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(parameters); return 0; }
fc0177a845ac5ef025aab01e2c1d9ff586892f14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _2DCONVOLUTION_KERNEL_H_ #define _2DCONVOLUTION_KERNEL_H_ #include <stdio.h> #include "2Dconvolution.h" #define N_SIZE BLOCK_SIZE+KERNEL_SIZE-1 #define OFF KERNEL_SIZE/2 // Matrix multiplication kernel thread specification __global__ void ConvolutionKernel(float *M, float *N, float *P, int M_h, int M_w, int N_h, int N_w) { // For 5x5 kernel // C(i,j) = sum (m = 0 to 4) { sum(n = 0 to 4) { A[m][n] * B[i+m-2][j+n-2] } } // where 0 <= i < B.height and 0 <= j < B.width int P_h = N_h; int P_w = N_w; __shared__ float Mds[KERNEL_SIZE][KERNEL_SIZE]; __shared__ float Nds[N_SIZE][N_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*BLOCK_SIZE + ty; int col = bx*BLOCK_SIZE + tx; float Pvalue = 0.0; // Load in the kernel using a tiled approach for(int i = 0; i <= KERNEL_SIZE/BLOCK_SIZE; i++) { for(int j = 0; j <= KERNEL_SIZE/BLOCK_SIZE; j++) { // Check that we are loading an address inside the kernel and then load it into shared memory if(tx+i*BLOCK_SIZE < KERNEL_SIZE && ty+j*BLOCK_SIZE < KERNEL_SIZE) { Mds[ty+j*BLOCK_SIZE][tx+i*BLOCK_SIZE] = M[(ty+j*BLOCK_SIZE)*M_w + tx+i*BLOCK_SIZE]; } } } // Load in KERNEL_SIZE/2 around the block using a tiled approach for(int i = 1; i <= (KERNEL_SIZE/2)/BLOCK_SIZE; i++) { for(int j = 1; j <= (KERNEL_SIZE/2)/BLOCK_SIZE; j++) { int xds = tx+i*BLOCK_SIZE+OFF; int yds = ty+j*BLOCK_SIZE+OFF; // First check that the index we want is a valid element of N, then check that it is needed // It will be needed if it fits into our Nds which is sized for BLOCK_SIZE and KERNEL_SIZE/2 on either side if(xds < N_SIZE && yds < N_SIZE) { int x = col+i*BLOCK_SIZE; int y = row+j*BLOCK_SIZE; if(x < N_w && y < N_h) { // Load in the index Nds[yds][xds] = N[y*N_w + x]; } else { Nds[yds][xds] = 0.0; } } } } // Don't do anything if we aren't operating on a valid pixel if(row < P_h && col < P_w) { // Load in entire block to shared memory Nds[ty+OFF][tx+OFF] = N[row*N_w + col]; // Ensure all threads have access to the shared memory loads __syncthreads(); unsigned int m_b = (row < OFF)? OFF - row : 0; unsigned int m_e = (row >= (N_h - OFF))? N_h - row + OFF : KERNEL_SIZE; unsigned int n_b = (col < OFF)? OFF - col : 0; unsigned int n_e = (col >= (N_w - OFF))? N_w - col + OFF : KERNEL_SIZE; for(int m = m_b; m < m_e; m++) { for(int n = n_b; n < n_e; n++) { Pvalue += Mds[m][n]*N[(m+row-OFF)*N_w + n+col-OFF]; //Pvalue += Mds[m][n]*Nds[m+ty][n+tx]; } } P[row*P_w + col] = Pvalue; } } #endif // #ifndef _2DCONVOLUTION_KERNEL_H_
fc0177a845ac5ef025aab01e2c1d9ff586892f14.cu
#ifndef _2DCONVOLUTION_KERNEL_H_ #define _2DCONVOLUTION_KERNEL_H_ #include <stdio.h> #include "2Dconvolution.h" #define N_SIZE BLOCK_SIZE+KERNEL_SIZE-1 #define OFF KERNEL_SIZE/2 // Matrix multiplication kernel thread specification __global__ void ConvolutionKernel(float *M, float *N, float *P, int M_h, int M_w, int N_h, int N_w) { // For 5x5 kernel // C(i,j) = sum (m = 0 to 4) { sum(n = 0 to 4) { A[m][n] * B[i+m-2][j+n-2] } } // where 0 <= i < B.height and 0 <= j < B.width int P_h = N_h; int P_w = N_w; __shared__ float Mds[KERNEL_SIZE][KERNEL_SIZE]; __shared__ float Nds[N_SIZE][N_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by*BLOCK_SIZE + ty; int col = bx*BLOCK_SIZE + tx; float Pvalue = 0.0; // Load in the kernel using a tiled approach for(int i = 0; i <= KERNEL_SIZE/BLOCK_SIZE; i++) { for(int j = 0; j <= KERNEL_SIZE/BLOCK_SIZE; j++) { // Check that we are loading an address inside the kernel and then load it into shared memory if(tx+i*BLOCK_SIZE < KERNEL_SIZE && ty+j*BLOCK_SIZE < KERNEL_SIZE) { Mds[ty+j*BLOCK_SIZE][tx+i*BLOCK_SIZE] = M[(ty+j*BLOCK_SIZE)*M_w + tx+i*BLOCK_SIZE]; } } } // Load in KERNEL_SIZE/2 around the block using a tiled approach for(int i = 1; i <= (KERNEL_SIZE/2)/BLOCK_SIZE; i++) { for(int j = 1; j <= (KERNEL_SIZE/2)/BLOCK_SIZE; j++) { int xds = tx+i*BLOCK_SIZE+OFF; int yds = ty+j*BLOCK_SIZE+OFF; // First check that the index we want is a valid element of N, then check that it is needed // It will be needed if it fits into our Nds which is sized for BLOCK_SIZE and KERNEL_SIZE/2 on either side if(xds < N_SIZE && yds < N_SIZE) { int x = col+i*BLOCK_SIZE; int y = row+j*BLOCK_SIZE; if(x < N_w && y < N_h) { // Load in the index Nds[yds][xds] = N[y*N_w + x]; } else { Nds[yds][xds] = 0.0; } } } } // Don't do anything if we aren't operating on a valid pixel if(row < P_h && col < P_w) { // Load in entire block to shared memory Nds[ty+OFF][tx+OFF] = N[row*N_w + col]; // Ensure all threads have access to the shared memory loads __syncthreads(); unsigned int m_b = (row < OFF)? OFF - row : 0; unsigned int m_e = (row >= (N_h - OFF))? N_h - row + OFF : KERNEL_SIZE; unsigned int n_b = (col < OFF)? OFF - col : 0; unsigned int n_e = (col >= (N_w - OFF))? N_w - col + OFF : KERNEL_SIZE; for(int m = m_b; m < m_e; m++) { for(int n = n_b; n < n_e; n++) { Pvalue += Mds[m][n]*N[(m+row-OFF)*N_w + n+col-OFF]; //Pvalue += Mds[m][n]*Nds[m+ty][n+tx]; } } P[row*P_w + col] = Pvalue; } } #endif // #ifndef _2DCONVOLUTION_KERNEL_H_
87292e141188debdd1a88c6a7e488db68c3276cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "keys.h" #include <algorithm> #include <random> #include <iostream> #include "device_launch_parameters.h" #include <thrust/execution_policy.h> #include "thrust/sequence.h" #include <thrust/gather.h> #include <thrust/sort.h> namespace Flavors { Keys::Keys(const Configuration& config, int count) : Store(config.Depth(), count), Config(config), Count(count) { } Keys::Keys(const Configuration& config, int count, unsigned* data) : Keys(config, count) { cuda::memory::copy(Store.Get(), data, Count * Depth() * sizeof(unsigned)); } void Keys::FillRandom(int seed) { std::mt19937 mt(seed); std::vector<unsigned> randomValues(Count); unsigned mask = 0; for (int level = 0; level < Depth(); ++level) { mask = Config.Mask(level); std::generate(randomValues.begin(), randomValues.end(), [&mt, &mask] { return mt() & mask; }); cuda::memory::copy(Store[level], randomValues.data(), Count * sizeof(unsigned)); } } std::vector<std::vector<unsigned>> Keys::ToHost() const { return Store.ToHost(); } std::ostream& operator<<(std::ostream& os, const Keys& obj) { auto h_store = obj.ToHost(); for (int item = 0; item < obj.Count; ++item) { for (int level = 0; level < obj.Config.Depth(); ++level) { for (int bit = obj.Config[level] - 1; bit >= 0; --bit) std::cout << ((h_store[level][item] >> bit) & 1u); std::cout << "\t"; } std::cout << std::endl; } return os; } bool operator==(const Keys& lhs, const Keys& rhs) { if (lhs.Count != rhs.Count || lhs.Config != rhs.Config) return false; auto h_lhs = lhs.ToHost(); auto h_rhs = rhs.ToHost(); for(int level = 0; level < lhs.Depth(); ++level) { auto cmpResult = std::mismatch(h_lhs[level].begin(), h_lhs[level].end(), h_rhs[level].begin()); if (cmpResult.first != h_lhs[level].end()) return false; } return lhs.Count == rhs.Count; } bool operator!=(const Keys& lhs, const Keys& rhs) { return !(lhs == rhs); } __global__ void reshape(int count, int keyLenght, int srcDepth, unsigned* srcLevels, unsigned** srcStore, int dstDepth, unsigned* dstLevels, unsigned** dstStore) { int key = blockIdx.x * blockDim.x + threadIdx.x; if (key >= count) return; int bit = 0; int srcBit = 0; int srcLevel = srcDepth - 1; int dstBit = 0; int dstLevel = dstDepth - 1; unsigned srcValue = srcStore[srcLevel][key]; unsigned dstValue = 0; while(bit < keyLenght) { if((srcValue >> srcBit) & 1u) dstValue = dstValue | (1u << dstBit); ++bit; ++srcBit; ++dstBit; if(dstBit == dstLevels[dstLevel]) { dstStore[dstLevel][key] = dstValue; dstValue = 0; dstBit = 0; dstLevel--; } if(srcBit == srcLevels[srcLevel]) { srcBit = 0; srcLevel--; if(srcLevel >= 0) srcValue = srcStore[srcLevel][key]; } } } void Keys::launchReshape(Configuration& newConfig, Keys& newKeys) { auto kernelConfig = make_launch_config(Count); cuda::launch( reshape, kernelConfig, Count, Config.Length, Config.Depth(), Config.Levels.Get(), Store.GetLevels(), newConfig.Depth(), newConfig.Levels.Get(), newKeys.Store.GetLevels() ); } void Keys::copyPermutation(Keys& newKeys) { if(Sorted()) { newKeys.Permutation = CudaArray<unsigned>{ Count }; cuda::memory::copy(newKeys.Permutation.Get(), Permutation.Get(), Count * sizeof(unsigned)); } } Keys Keys::ReshapeKeys(Configuration& newConfig) { Keys newKeys{ newConfig, Count }; launchReshape(newConfig, newKeys); copyPermutation(newKeys); return newKeys; } void Keys::Sort() { CudaArray<unsigned> tmp{ Count }; initPermutation(); for(int level = Depth() - 1; level >= 0; --level) updatePermutation(level, tmp); for (int level = 0; level < Depth(); ++level) applyPermutation(level, tmp); } __global__ void markBorders(int count, int level, unsigned** nodesBorders, unsigned** store) { int entry = blockIdx.x * blockDim.x + threadIdx.x + 1; if (entry < count) { if (store[level - 1][entry - 1] != store[level - 1][entry]) nodesBorders[level][entry] = 1; } } Cuda2DArray Keys::Borders() { if (!Sorted()) Sort(); Cuda2DArray borders{ Depth(), Count }; unsigned mark = 1u; cuda::memory::copy(borders[0], &mark, sizeof(unsigned)); auto kernelConfig = make_launch_config(Count); for (int level = 1; level < Depth(); ++level) { cuda::memory::copy(borders[level], borders[level - 1], Count * sizeof(unsigned)); cuda::launch( markBorders, kernelConfig, Count, level, borders.GetLevels(), Store.GetLevels()); } return borders; } void Keys::initPermutation() { Permutation = CudaArray<unsigned>{ Count }; thrust::sequence(thrust::device, Permutation.Get(), Permutation.Get() + Count); } void Keys::updatePermutation(int level, CudaArray<unsigned>& tmp) { thrust::gather(thrust::device, Permutation.Get(), Permutation.Get() + Count, Store[level], tmp.Get()); thrust::stable_sort_by_key(thrust::device, tmp.Get(), tmp.Get() + Count, Permutation.Get()); } void Keys::applyPermutation(int level, CudaArray<unsigned>& tmp) { thrust::gather(thrust::device, Permutation.Get(), Permutation.Get() + Count, Store[level], tmp.Get()); cuda::memory::copy(Store[level], tmp.Get(), Count * sizeof(unsigned)); } }
87292e141188debdd1a88c6a7e488db68c3276cc.cu
#include "keys.h" #include <algorithm> #include <random> #include <iostream> #include "device_launch_parameters.h" #include <thrust/execution_policy.h> #include "thrust/sequence.h" #include <thrust/gather.h> #include <thrust/sort.h> namespace Flavors { Keys::Keys(const Configuration& config, int count) : Store(config.Depth(), count), Config(config), Count(count) { } Keys::Keys(const Configuration& config, int count, unsigned* data) : Keys(config, count) { cuda::memory::copy(Store.Get(), data, Count * Depth() * sizeof(unsigned)); } void Keys::FillRandom(int seed) { std::mt19937 mt(seed); std::vector<unsigned> randomValues(Count); unsigned mask = 0; for (int level = 0; level < Depth(); ++level) { mask = Config.Mask(level); std::generate(randomValues.begin(), randomValues.end(), [&mt, &mask] { return mt() & mask; }); cuda::memory::copy(Store[level], randomValues.data(), Count * sizeof(unsigned)); } } std::vector<std::vector<unsigned>> Keys::ToHost() const { return Store.ToHost(); } std::ostream& operator<<(std::ostream& os, const Keys& obj) { auto h_store = obj.ToHost(); for (int item = 0; item < obj.Count; ++item) { for (int level = 0; level < obj.Config.Depth(); ++level) { for (int bit = obj.Config[level] - 1; bit >= 0; --bit) std::cout << ((h_store[level][item] >> bit) & 1u); std::cout << "\t"; } std::cout << std::endl; } return os; } bool operator==(const Keys& lhs, const Keys& rhs) { if (lhs.Count != rhs.Count || lhs.Config != rhs.Config) return false; auto h_lhs = lhs.ToHost(); auto h_rhs = rhs.ToHost(); for(int level = 0; level < lhs.Depth(); ++level) { auto cmpResult = std::mismatch(h_lhs[level].begin(), h_lhs[level].end(), h_rhs[level].begin()); if (cmpResult.first != h_lhs[level].end()) return false; } return lhs.Count == rhs.Count; } bool operator!=(const Keys& lhs, const Keys& rhs) { return !(lhs == rhs); } __global__ void reshape(int count, int keyLenght, int srcDepth, unsigned* srcLevels, unsigned** srcStore, int dstDepth, unsigned* dstLevels, unsigned** dstStore) { int key = blockIdx.x * blockDim.x + threadIdx.x; if (key >= count) return; int bit = 0; int srcBit = 0; int srcLevel = srcDepth - 1; int dstBit = 0; int dstLevel = dstDepth - 1; unsigned srcValue = srcStore[srcLevel][key]; unsigned dstValue = 0; while(bit < keyLenght) { if((srcValue >> srcBit) & 1u) dstValue = dstValue | (1u << dstBit); ++bit; ++srcBit; ++dstBit; if(dstBit == dstLevels[dstLevel]) { dstStore[dstLevel][key] = dstValue; dstValue = 0; dstBit = 0; dstLevel--; } if(srcBit == srcLevels[srcLevel]) { srcBit = 0; srcLevel--; if(srcLevel >= 0) srcValue = srcStore[srcLevel][key]; } } } void Keys::launchReshape(Configuration& newConfig, Keys& newKeys) { auto kernelConfig = make_launch_config(Count); cuda::launch( reshape, kernelConfig, Count, Config.Length, Config.Depth(), Config.Levels.Get(), Store.GetLevels(), newConfig.Depth(), newConfig.Levels.Get(), newKeys.Store.GetLevels() ); } void Keys::copyPermutation(Keys& newKeys) { if(Sorted()) { newKeys.Permutation = CudaArray<unsigned>{ Count }; cuda::memory::copy(newKeys.Permutation.Get(), Permutation.Get(), Count * sizeof(unsigned)); } } Keys Keys::ReshapeKeys(Configuration& newConfig) { Keys newKeys{ newConfig, Count }; launchReshape(newConfig, newKeys); copyPermutation(newKeys); return newKeys; } void Keys::Sort() { CudaArray<unsigned> tmp{ Count }; initPermutation(); for(int level = Depth() - 1; level >= 0; --level) updatePermutation(level, tmp); for (int level = 0; level < Depth(); ++level) applyPermutation(level, tmp); } __global__ void markBorders(int count, int level, unsigned** nodesBorders, unsigned** store) { int entry = blockIdx.x * blockDim.x + threadIdx.x + 1; if (entry < count) { if (store[level - 1][entry - 1] != store[level - 1][entry]) nodesBorders[level][entry] = 1; } } Cuda2DArray Keys::Borders() { if (!Sorted()) Sort(); Cuda2DArray borders{ Depth(), Count }; unsigned mark = 1u; cuda::memory::copy(borders[0], &mark, sizeof(unsigned)); auto kernelConfig = make_launch_config(Count); for (int level = 1; level < Depth(); ++level) { cuda::memory::copy(borders[level], borders[level - 1], Count * sizeof(unsigned)); cuda::launch( markBorders, kernelConfig, Count, level, borders.GetLevels(), Store.GetLevels()); } return borders; } void Keys::initPermutation() { Permutation = CudaArray<unsigned>{ Count }; thrust::sequence(thrust::device, Permutation.Get(), Permutation.Get() + Count); } void Keys::updatePermutation(int level, CudaArray<unsigned>& tmp) { thrust::gather(thrust::device, Permutation.Get(), Permutation.Get() + Count, Store[level], tmp.Get()); thrust::stable_sort_by_key(thrust::device, tmp.Get(), tmp.Get() + Count, Permutation.Get()); } void Keys::applyPermutation(int level, CudaArray<unsigned>& tmp) { thrust::gather(thrust::device, Permutation.Get(), Permutation.Get() + Count, Store[level], tmp.Get()); cuda::memory::copy(Store[level], tmp.Get(), Count * sizeof(unsigned)); } }
3b315452673611056977ddbc85779ba2bfc96e8e.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/ConfinedStreamingMethodGPU.cu * \brief Defines GPU functions and kernels used by mpcd::ConfinedStreamingMethodGPU * * \warning * This file needs separable compilation with ExternalFields.cu. Any plugins extending * the ConfinedStreamingGeometryGPU will also need to do separable compilation with * ExternalFields.cu. */ #include "ConfinedStreamingMethodGPU.cuh" #include "StreamingGeometry.h" #include "ExternalField.h" #include "hoomd/GPUPolymorph.cuh" namespace mpcd { namespace gpu { //! Template instantiation of bulk geometry streaming template hipError_t confined_stream<mpcd::detail::BulkGeometry> (const stream_args_t& args, const mpcd::detail::BulkGeometry& geom); //! Template instantiation of slit geometry streaming template hipError_t confined_stream<mpcd::detail::SlitGeometry> (const stream_args_t& args, const mpcd::detail::SlitGeometry& geom); } // end namespace gpu } // end namespace mpcd
3b315452673611056977ddbc85779ba2bfc96e8e.cu
// Copyright (c) 2009-2019 The Regents of the University of Michigan // This file is part of the HOOMD-blue project, released under the BSD 3-Clause License. // Maintainer: mphoward /*! * \file mpcd/ConfinedStreamingMethodGPU.cu * \brief Defines GPU functions and kernels used by mpcd::ConfinedStreamingMethodGPU * * \warning * This file needs separable compilation with ExternalFields.cu. Any plugins extending * the ConfinedStreamingGeometryGPU will also need to do separable compilation with * ExternalFields.cu. */ #include "ConfinedStreamingMethodGPU.cuh" #include "StreamingGeometry.h" #include "ExternalField.h" #include "hoomd/GPUPolymorph.cuh" namespace mpcd { namespace gpu { //! Template instantiation of bulk geometry streaming template cudaError_t confined_stream<mpcd::detail::BulkGeometry> (const stream_args_t& args, const mpcd::detail::BulkGeometry& geom); //! Template instantiation of slit geometry streaming template cudaError_t confined_stream<mpcd::detail::SlitGeometry> (const stream_args_t& args, const mpcd::detail::SlitGeometry& geom); } // end namespace gpu } // end namespace mpcd
dbfee151a4c9839255d0cc299cfb4ea002ecc87c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "main.h" __global__ void multiply_gpu_tiling(double *a, double *b, double *c) { __shared__ double a_tile[TILE_DIM * TILE_DIM]; __shared__ double b_tile[TILE_DIM * TILE_DIM]; int n_tiles = (int)ceil((float)N / (float)TILE_DIM); int tile_row = blockIdx.x; int tile_col = blockIdx.y; int num_row = threadIdx.x; int num_col = threadIdx.y; double result = 0; for (int i = 0; i < n_tiles; i++) { if (i * TILE_DIM + num_col < N && tile_row * TILE_DIM + num_row < N) { a_tile[num_row * TILE_DIM + num_col] = a[tile_row * N * TILE_DIM + i * TILE_DIM + N * num_row + num_col]; } else { a_tile[num_row * TILE_DIM + num_col] = 0; } if (i * TILE_DIM + num_col < N && tile_col * TILE_DIM + num_row < N) { b_tile[num_row * TILE_DIM + num_col] = b[tile_col * TILE_DIM + i * N * TILE_DIM + N * num_col + num_row]; } else { b_tile[num_row * TILE_DIM + num_col] = 0; } __syncthreads(); for (int j = 0; j < TILE_DIM; j++) { result += a_tile[num_row * TILE_DIM + j] * b_tile[num_col * TILE_DIM + j]; } __syncthreads(); } if (tile_col * TILE_DIM + num_col < N && tile_row * TILE_DIM + num_row < N) { c[tile_row * TILE_DIM * N + tile_col * TILE_DIM + num_col + num_row * N] = result; } } __global__ void multiply_gpu(double *a, double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int idx = j * N + i; if (j < N && i < N) { double res = 0; for (int k = 0; k < N; k++) { res += a[j * N + k] * b[k * N + i]; } c[idx] = res; } } int main(int argc, char *argv[]) { // set up device hipDeviceProp_t deviceProp; SAFE_CALL(hipGetDeviceProperties(&deviceProp, 0), "Error getting device properties"); printf("Using device: %s\n", deviceProp.name); SAFE_CALL(hipSetDevice(0), "Error setting device"); // initialize matrices on host double *a = (double *)calloc(N * N, sizeof(double)); double *b = (double *)calloc(N * N, sizeof(double)); double *c = (double *)calloc(N * N, sizeof(double)); double *d = (double *)calloc(N * N, sizeof(double)); double *e = (double *)calloc(N * N, sizeof(double)); fill_matrix(a); fill_matrix(b); // assign device global memory double *d_a, *d_b, *d_c, *d_d; SAFE_CALL(hipMalloc((void **)&d_a, N * N * sizeof(double)), "Error allocating d_a"); SAFE_CALL(hipMalloc((void **)&d_b, N * N * sizeof(double)), "Error allocating d_b"); SAFE_CALL(hipMalloc((void **)&d_c, N * N * sizeof(double)), "Error allocating d_c"); SAFE_CALL(hipMalloc((void **)&d_d, N * N * sizeof(double)), "Error allocating d_d"); // transfer data from host to device SAFE_CALL(hipMemcpy(d_a, a, N * N * sizeof(double), hipMemcpyHostToDevice), "Error copying a"); SAFE_CALL(hipMemcpy(d_b, b, N * N * sizeof(double), hipMemcpyHostToDevice), "Error copying b"); // configure grid & run multiply with tiling dim3 block_tile(TILE_DIM, TILE_DIM); dim3 grid_tile((N + block_tile.x - 1) / block_tile.x, (N + block_tile.y - 1) / block_tile.y); auto start_gpu_tile = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiply_gpu_tiling), dim3(grid_tile), dim3(block_tile), 0, 0, d_a, d_b, d_c); SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel"); auto end_gpu_tile = chrono::high_resolution_clock::now(); // check for kernel errors SAFE_CALL(hipGetLastError(), "Error with last error"); // configure grid & run multiply without tiling dim3 block(BLOCK_DIM, BLOCK_DIM); dim3 grid((N + block.x - 1) / block.x, (N + block.y - 1) / block.y); auto start_gpu = chrono::high_resolution_clock::now(); hipLaunchKernelGGL(( multiply_gpu), dim3(grid), dim3(block), 0, 0, d_a, d_b, d_d); SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel"); auto end_gpu = chrono::high_resolution_clock::now(); // check for kernel errors SAFE_CALL(hipGetLastError(), "Error with last error"); // copy results to host SAFE_CALL(hipMemcpy(c, d_c, N * N * sizeof(double), hipMemcpyDeviceToHost), "Error copying c"); SAFE_CALL(hipMemcpy(d, d_d, N * N * sizeof(double), hipMemcpyDeviceToHost), "Error copying d"); // free device global memory SAFE_CALL(hipFree(d_a), "Error freeing memory"); SAFE_CALL(hipFree(d_b), "Error freeing memory"); SAFE_CALL(hipFree(d_c), "Error freeing memory"); SAFE_CALL(hipFree(d_d), "Error freeing memory"); // reset device SAFE_CALL(hipDeviceReset(), "Error resetting"); // multiply on host multiply_cpu(a, b, e); // check results cout << "CHECKING RESULTS OBTAINED WITH TILING" << endl; check_result(c, e); cout << "CHECKING RESULTS OBTAINED WITHOUT TILING" << endl; check_result(d, e); // free host memory free(a); free(b); free(c); free(d); free(e); // print results chrono::duration<float, std::milli> duration_gpu_tile = end_gpu_tile - start_gpu_tile; chrono::duration<float, std::milli> duration_gpu = end_gpu - start_gpu; cout << "WITH TILING: " << duration_gpu_tile.count() << "ms" << endl; cout << "WITHOUT TILING: " << duration_gpu.count() << "ms" << endl; cout << "SPEEDUP: " << duration_gpu.count() / duration_gpu_tile.count() << endl; return 0; }
dbfee151a4c9839255d0cc299cfb4ea002ecc87c.cu
#include "main.h" __global__ void multiply_gpu_tiling(double *a, double *b, double *c) { __shared__ double a_tile[TILE_DIM * TILE_DIM]; __shared__ double b_tile[TILE_DIM * TILE_DIM]; int n_tiles = (int)ceil((float)N / (float)TILE_DIM); int tile_row = blockIdx.x; int tile_col = blockIdx.y; int num_row = threadIdx.x; int num_col = threadIdx.y; double result = 0; for (int i = 0; i < n_tiles; i++) { if (i * TILE_DIM + num_col < N && tile_row * TILE_DIM + num_row < N) { a_tile[num_row * TILE_DIM + num_col] = a[tile_row * N * TILE_DIM + i * TILE_DIM + N * num_row + num_col]; } else { a_tile[num_row * TILE_DIM + num_col] = 0; } if (i * TILE_DIM + num_col < N && tile_col * TILE_DIM + num_row < N) { b_tile[num_row * TILE_DIM + num_col] = b[tile_col * TILE_DIM + i * N * TILE_DIM + N * num_col + num_row]; } else { b_tile[num_row * TILE_DIM + num_col] = 0; } __syncthreads(); for (int j = 0; j < TILE_DIM; j++) { result += a_tile[num_row * TILE_DIM + j] * b_tile[num_col * TILE_DIM + j]; } __syncthreads(); } if (tile_col * TILE_DIM + num_col < N && tile_row * TILE_DIM + num_row < N) { c[tile_row * TILE_DIM * N + tile_col * TILE_DIM + num_col + num_row * N] = result; } } __global__ void multiply_gpu(double *a, double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int idx = j * N + i; if (j < N && i < N) { double res = 0; for (int k = 0; k < N; k++) { res += a[j * N + k] * b[k * N + i]; } c[idx] = res; } } int main(int argc, char *argv[]) { // set up device cudaDeviceProp deviceProp; SAFE_CALL(cudaGetDeviceProperties(&deviceProp, 0), "Error getting device properties"); printf("Using device: %s\n", deviceProp.name); SAFE_CALL(cudaSetDevice(0), "Error setting device"); // initialize matrices on host double *a = (double *)calloc(N * N, sizeof(double)); double *b = (double *)calloc(N * N, sizeof(double)); double *c = (double *)calloc(N * N, sizeof(double)); double *d = (double *)calloc(N * N, sizeof(double)); double *e = (double *)calloc(N * N, sizeof(double)); fill_matrix(a); fill_matrix(b); // assign device global memory double *d_a, *d_b, *d_c, *d_d; SAFE_CALL(cudaMalloc((void **)&d_a, N * N * sizeof(double)), "Error allocating d_a"); SAFE_CALL(cudaMalloc((void **)&d_b, N * N * sizeof(double)), "Error allocating d_b"); SAFE_CALL(cudaMalloc((void **)&d_c, N * N * sizeof(double)), "Error allocating d_c"); SAFE_CALL(cudaMalloc((void **)&d_d, N * N * sizeof(double)), "Error allocating d_d"); // transfer data from host to device SAFE_CALL(cudaMemcpy(d_a, a, N * N * sizeof(double), cudaMemcpyHostToDevice), "Error copying a"); SAFE_CALL(cudaMemcpy(d_b, b, N * N * sizeof(double), cudaMemcpyHostToDevice), "Error copying b"); // configure grid & run multiply with tiling dim3 block_tile(TILE_DIM, TILE_DIM); dim3 grid_tile((N + block_tile.x - 1) / block_tile.x, (N + block_tile.y - 1) / block_tile.y); auto start_gpu_tile = chrono::high_resolution_clock::now(); multiply_gpu_tiling<<<grid_tile, block_tile>>>(d_a, d_b, d_c); SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel"); auto end_gpu_tile = chrono::high_resolution_clock::now(); // check for kernel errors SAFE_CALL(cudaGetLastError(), "Error with last error"); // configure grid & run multiply without tiling dim3 block(BLOCK_DIM, BLOCK_DIM); dim3 grid((N + block.x - 1) / block.x, (N + block.y - 1) / block.y); auto start_gpu = chrono::high_resolution_clock::now(); multiply_gpu<<<grid, block>>>(d_a, d_b, d_d); SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel"); auto end_gpu = chrono::high_resolution_clock::now(); // check for kernel errors SAFE_CALL(cudaGetLastError(), "Error with last error"); // copy results to host SAFE_CALL(cudaMemcpy(c, d_c, N * N * sizeof(double), cudaMemcpyDeviceToHost), "Error copying c"); SAFE_CALL(cudaMemcpy(d, d_d, N * N * sizeof(double), cudaMemcpyDeviceToHost), "Error copying d"); // free device global memory SAFE_CALL(cudaFree(d_a), "Error freeing memory"); SAFE_CALL(cudaFree(d_b), "Error freeing memory"); SAFE_CALL(cudaFree(d_c), "Error freeing memory"); SAFE_CALL(cudaFree(d_d), "Error freeing memory"); // reset device SAFE_CALL(cudaDeviceReset(), "Error resetting"); // multiply on host multiply_cpu(a, b, e); // check results cout << "CHECKING RESULTS OBTAINED WITH TILING" << endl; check_result(c, e); cout << "CHECKING RESULTS OBTAINED WITHOUT TILING" << endl; check_result(d, e); // free host memory free(a); free(b); free(c); free(d); free(e); // print results chrono::duration<float, std::milli> duration_gpu_tile = end_gpu_tile - start_gpu_tile; chrono::duration<float, std::milli> duration_gpu = end_gpu - start_gpu; cout << "WITH TILING: " << duration_gpu_tile.count() << "ms" << endl; cout << "WITHOUT TILING: " << duration_gpu.count() << "ms" << endl; cout << "SPEEDUP: " << duration_gpu.count() / duration_gpu_tile.count() << endl; return 0; }
2980bbb26ff404d575cec2458bbf14d7443e5d9e.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "transposeUnroll8Row.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; hipMalloc(&out, XSIZE*YSIZE); float *in = NULL; hipMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( transposeUnroll8Row), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( transposeUnroll8Row), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( transposeUnroll8Row), dim3(gridBlock),dim3(threadBlock), 0, 0, out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
2980bbb26ff404d575cec2458bbf14d7443e5d9e.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "transposeUnroll8Row.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *out = NULL; cudaMalloc(&out, XSIZE*YSIZE); float *in = NULL; cudaMalloc(&in, XSIZE*YSIZE); const int nx = 1; const int ny = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); transposeUnroll8Row<<<gridBlock,threadBlock>>>(out,in,nx,ny); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { transposeUnroll8Row<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { transposeUnroll8Row<<<gridBlock,threadBlock>>>(out,in,nx,ny); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
c9e3ae1ff3f69bb35b1747d20c289e82a2ee2b3c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <wb.h> #define wbCheck(stmt) \ do { \ hipError_t err = stmt; \ if (err != hipSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", hipGetErrorString(err)); \ return -1; \ } \ } while (0) __host__ __device__ int outInvariant(int inValue) { return inValue * inValue; } __host__ __device__ int outDependent(int value, int inIdx, int outIdx) { if (inIdx == outIdx) { return 2 * value; } else if (inIdx > outIdx) { return value / (inIdx - outIdx); } else { return value / (outIdx - inIdx); } } __global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE unsigned int inIdx = blockIdx.x * blockDim.x + threadIdx.x; if (inIdx<len){ //avoid indexing outside array bounds int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { //out[outIdx] += outDependent(intermediate, inIdx, outIdx); atomicAdd(&out[outIdx], outDependent(intermediate, inIdx, outIdx)); } } } __global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE unsigned int outIdx = blockIdx.x * blockDim.x + threadIdx.x; if (outIdx<len){ //avoid indexing outside array bounds for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_scatter(int *in, int *out, int len) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE for (int outIdx = 0; outIdx < len; ++outIdx) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_gpu_scatter(int *in, int *out, int len) { //@@ INSERT CODE HERE // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 32; // Use 1D, 16 thread blocks dim3 gridDim((len-1)/BLOCK_SIZE + 1, 1, 1); dim3 blockDim(BLOCK_SIZE, 1, 1); // Invoke CUDA kernel ----------------------------------------------------- hipLaunchKernelGGL(( s2g_gpu_scatter_kernel) , dim3(gridDim), dim3(blockDim) , 0, 0, in, out, len); } static void s2g_gpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 32; // Use 1D, 16 thread blocks dim3 gridDim((len-1)/BLOCK_SIZE + 1, 1, 1); dim3 blockDim(BLOCK_SIZE, 1, 1); // Invoke CUDA kernel ----------------------------------------------------- hipLaunchKernelGGL(( s2g_gpu_gather_kernel) , dim3(gridDim), dim3(blockDim) , 0, 0, in, out, len);; } int main(int argc, char **argv) { wbArg_t args; int inputLength; int *hostInput; int *hostOutput; int *deviceInput; int *deviceOutput; size_t byteCount; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = ( int * )wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostOutput = ( int * )malloc(inputLength * sizeof(int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); byteCount = inputLength * sizeof(int); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(hipMalloc(( void ** )&deviceInput, byteCount)); wbCheck(hipMalloc(( void ** )&deviceOutput, byteCount)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck( hipMemcpy(deviceInput, hostInput, byteCount, hipMemcpyHostToDevice)); wbCheck(hipMemset(deviceOutput, 0, byteCount)); wbTime_stop(GPU, "Copying input memory to the GPU."); ////////////////////////////////////////// // CPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Scatter computation"); s2g_cpu_scatter(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Scatter computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_scatter(deviceInput, deviceOutput, inputLength); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck( hipMemcpy(hostOutput, deviceOutput, byteCount, hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(hipMemset(deviceOutput, 0, byteCount)); ////////////////////////////////////////// // CPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Gather computation"); s2g_cpu_gather(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Gather computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength); hipDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck( hipMemcpy(hostOutput, deviceOutput, byteCount, hipMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(hipMemset(deviceOutput, 0, byteCount)); wbTime_start(GPU, "Freeing GPU Memory"); hipFree(deviceInput); hipFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostInput); free(hostOutput); return 0; }
c9e3ae1ff3f69bb35b1747d20c289e82a2ee2b3c.cu
#include <wb.h> #define wbCheck(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ wbLog(ERROR, "Failed to run stmt ", #stmt); \ wbLog(ERROR, "Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __host__ __device__ int outInvariant(int inValue) { return inValue * inValue; } __host__ __device__ int outDependent(int value, int inIdx, int outIdx) { if (inIdx == outIdx) { return 2 * value; } else if (inIdx > outIdx) { return value / (inIdx - outIdx); } else { return value / (outIdx - inIdx); } } __global__ void s2g_gpu_scatter_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE unsigned int inIdx = blockIdx.x * blockDim.x + threadIdx.x; if (inIdx<len){ //avoid indexing outside array bounds int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { //out[outIdx] += outDependent(intermediate, inIdx, outIdx); atomicAdd(&out[outIdx], outDependent(intermediate, inIdx, outIdx)); } } } __global__ void s2g_gpu_gather_kernel(int *in, int *out, int len) { //@@ INSERT CODE HERE unsigned int outIdx = blockIdx.x * blockDim.x + threadIdx.x; if (outIdx<len){ //avoid indexing outside array bounds for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_scatter(int *in, int *out, int len) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); for (int outIdx = 0; outIdx < len; ++outIdx) { out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_cpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE for (int outIdx = 0; outIdx < len; ++outIdx) { for (int inIdx = 0; inIdx < len; ++inIdx) { int intermediate = outInvariant(in[inIdx]); out[outIdx] += outDependent(intermediate, inIdx, outIdx); } } } static void s2g_gpu_scatter(int *in, int *out, int len) { //@@ INSERT CODE HERE // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 32; // Use 1D, 16 thread blocks dim3 gridDim((len-1)/BLOCK_SIZE + 1, 1, 1); dim3 blockDim(BLOCK_SIZE, 1, 1); // Invoke CUDA kernel ----------------------------------------------------- s2g_gpu_scatter_kernel <<< gridDim, blockDim >>> (in, out, len); } static void s2g_gpu_gather(int *in, int *out, int len) { //@@ INSERT CODE HERE // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 32; // Use 1D, 16 thread blocks dim3 gridDim((len-1)/BLOCK_SIZE + 1, 1, 1); dim3 blockDim(BLOCK_SIZE, 1, 1); // Invoke CUDA kernel ----------------------------------------------------- s2g_gpu_gather_kernel <<< gridDim, blockDim >>> (in, out, len);; } int main(int argc, char **argv) { wbArg_t args; int inputLength; int *hostInput; int *hostOutput; int *deviceInput; int *deviceOutput; size_t byteCount; args = wbArg_read(argc, argv); wbTime_start(Generic, "Importing data and creating memory on host"); hostInput = ( int * )wbImport(wbArg_getInputFile(args, 0), &inputLength, "Integer"); hostOutput = ( int * )malloc(inputLength * sizeof(int)); wbTime_stop(Generic, "Importing data and creating memory on host"); wbLog(TRACE, "The input length is ", inputLength); byteCount = inputLength * sizeof(int); wbTime_start(GPU, "Allocating GPU memory."); wbCheck(cudaMalloc(( void ** )&deviceInput, byteCount)); wbCheck(cudaMalloc(( void ** )&deviceOutput, byteCount)); wbTime_stop(GPU, "Allocating GPU memory."); wbTime_start(GPU, "Copying input memory to the GPU."); wbCheck( cudaMemcpy(deviceInput, hostInput, byteCount, cudaMemcpyHostToDevice)); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); wbTime_stop(GPU, "Copying input memory to the GPU."); ////////////////////////////////////////// // CPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Scatter computation"); s2g_cpu_scatter(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Scatter computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Scatter Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_scatter(deviceInput, deviceOutput, inputLength); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck( cudaMemcpy(hostOutput, deviceOutput, byteCount, cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); ////////////////////////////////////////// // CPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing CPU Gather computation"); s2g_cpu_gather(hostInput, hostOutput, inputLength); wbTime_stop(Compute, "Performing CPU Gather computation"); wbSolution(args, hostOutput, inputLength); memset(hostOutput, 0, byteCount); ////////////////////////////////////////// // GPU Gather Computation ////////////////////////////////////////// wbTime_start(Compute, "Performing GPU Scatter computation"); s2g_gpu_gather(deviceInput, deviceOutput, inputLength); cudaDeviceSynchronize(); wbTime_stop(Compute, "Performing GPU Scatter computation"); wbTime_start(Copy, "Copying output memory to the CPU"); wbCheck( cudaMemcpy(hostOutput, deviceOutput, byteCount, cudaMemcpyDeviceToHost)); wbTime_stop(Copy, "Copying output memory to the CPU"); wbSolution(args, hostOutput, inputLength); wbCheck(cudaMemset(deviceOutput, 0, byteCount)); wbTime_start(GPU, "Freeing GPU Memory"); cudaFree(deviceInput); cudaFree(deviceOutput); wbTime_stop(GPU, "Freeing GPU Memory"); free(hostInput); free(hostOutput); return 0; }
774af1b2977028bfa24956ae9373740aeabc261e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <string.h> __global__ void CountSort(int*, int*, int, int); __host__ void counting_sort(int* arr, int size, int max_val) { int block_num = 4; int thread_num_per_block = 1000; uint64_t histo_size = sizeof(int)*max_val*block_num; printf("size: %d\n", size); printf("max_val: %d\n", max_val); printf("block_num: %d\n", block_num); printf("thread_per_block: %d\n", thread_num_per_block); int* dhisto; hipMalloc(&dhisto, histo_size); hipMemset(dhisto, 0, histo_size); int* darr; hipMalloc(&darr, sizeof(int)*size); hipMemcpy(darr, arr, sizeof(int)*size, hipMemcpyHostToDevice); printf("countsort start\n"); hipLaunchKernelGGL(( CountSort), dim3(block_num), dim3(thread_num_per_block), 0, 0, darr, dhisto, size, max_val); printf("countsort end\n"); int* histo = (int*)calloc(max_val, sizeof(int)); hipMemcpy(histo, dhisto, sizeof(int)*max_val, hipMemcpyDeviceToHost); /* int cnt = 0; for(int i=0; i<max_val; i++) { cnt += histo[i]; } printf("cnt: %d\n", cnt); */ int idx = 0; for(int i=0; i<max_val; i++) { for(int j=0; j<histo[i]; j++) { arr[idx++] = i; } } //hipFree(dhisto); //hipFree(darr); //free(histo); } __global__ void CountSort(int* darr, int* dhisto, int size, int max_val) { int thread_per_block = blockDim.x; int total_block = gridDim.x; int bid = blockIdx.x; int tid = threadIdx.x; uint64_t size_per_block, bstart, size_per_thread, start, end; // update histogram in each block if(size % total_block != 0 && bid == total_block - 1) { size_per_block = size / total_block + size % total_block; bstart = bid * (size / total_block); size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } } else { size_per_block = size / total_block; bstart = bid * size_per_block; size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } } for(uint64_t i=start; i<end; i++) { atomicAdd(&dhisto[(uint64_t)(darr[i] + bid * max_val)], 1); } __syncthreads(); size_per_block = max_val; bstart = bid * size_per_block; size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } if(bid != 0) { for(uint64_t i=start; i<end; i++) { atomicAdd(&dhisto[i%max_val], dhisto[i]); } } __syncthreads(); }
774af1b2977028bfa24956ae9373740aeabc261e.cu
#include <cuda.h> #include <stdio.h> #include <string.h> __global__ void CountSort(int*, int*, int, int); __host__ void counting_sort(int* arr, int size, int max_val) { int block_num = 4; int thread_num_per_block = 1000; uint64_t histo_size = sizeof(int)*max_val*block_num; printf("size: %d\n", size); printf("max_val: %d\n", max_val); printf("block_num: %d\n", block_num); printf("thread_per_block: %d\n", thread_num_per_block); int* dhisto; cudaMalloc(&dhisto, histo_size); cudaMemset(dhisto, 0, histo_size); int* darr; cudaMalloc(&darr, sizeof(int)*size); cudaMemcpy(darr, arr, sizeof(int)*size, cudaMemcpyHostToDevice); printf("countsort start\n"); CountSort<<<block_num, thread_num_per_block>>>(darr, dhisto, size, max_val); printf("countsort end\n"); int* histo = (int*)calloc(max_val, sizeof(int)); cudaMemcpy(histo, dhisto, sizeof(int)*max_val, cudaMemcpyDeviceToHost); /* int cnt = 0; for(int i=0; i<max_val; i++) { cnt += histo[i]; } printf("cnt: %d\n", cnt); */ int idx = 0; for(int i=0; i<max_val; i++) { for(int j=0; j<histo[i]; j++) { arr[idx++] = i; } } //cudaFree(dhisto); //cudaFree(darr); //free(histo); } __global__ void CountSort(int* darr, int* dhisto, int size, int max_val) { int thread_per_block = blockDim.x; int total_block = gridDim.x; int bid = blockIdx.x; int tid = threadIdx.x; uint64_t size_per_block, bstart, size_per_thread, start, end; // update histogram in each block if(size % total_block != 0 && bid == total_block - 1) { size_per_block = size / total_block + size % total_block; bstart = bid * (size / total_block); size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } } else { size_per_block = size / total_block; bstart = bid * size_per_block; size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } } for(uint64_t i=start; i<end; i++) { atomicAdd(&dhisto[(uint64_t)(darr[i] + bid * max_val)], 1); } __syncthreads(); size_per_block = max_val; bstart = bid * size_per_block; size_per_thread = size_per_block / thread_per_block; start = bstart + tid * size_per_thread; end = start + size_per_thread; if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) { end += size_per_block % thread_per_block; } if(bid != 0) { for(uint64_t i=start; i<end; i++) { atomicAdd(&dhisto[i%max_val], dhisto[i]); } } __syncthreads(); }
8e4aeda8ecbc361da069fd0decfd30281522c146.hip
// !!! This is a file automatically generated by hipify!!! #include "gpus/cuda_handle_error.h" //#include "gpus/cusparse_spmm.h" #include "gpus/gpu_csr_kernel.h" #include "gpus/timer.h" #include "tools/ntimer.h" #include "sort_network.cuh" //#include "large.cuh" #include "radix_sort.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/count.h> #include "gnnz.cuh" #include "gspgemm.cuh" #include <assert.h> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> //#include <thrust/make_zip_iterator.h> //#include <thrust/sort_by_key.h> //#include <thrust/make_tuple.h> #include <thrust/tuple.h> #include "moderngpu.cuh" #include <inttypes.h> #define ENABLE_DEBUG 0 #define ENABLE_PRINT 0 using namespace std; using namespace mgpu; //variable that gives average size of segment? const int FLOPS_SORT = 1024; struct pack{ template <typename Tuple> __device__ __host__ int64 operator()(const Tuple &t){ return ( static_cast<int64>( thrust::get<0>(t) ) << 32 ) | thrust::get<1>(t); } /* __device__ __host__ int32 operator()(const Tuple &t){ return ( ( thrust::get<0>(t) ) << 16 ) | thrust::get<1>(t); } */}; struct unpack{ __device__ __host__ thrust::tuple<int,int> operator()(int64 p){ int d = static_cast<int>(p >> 32); int s = static_cast<int>(p & 0xffffffff); return thrust::make_tuple(d, s); } /* __device__ __host__ thrust::tuple<int,int> operator()(int32 p){ int d = static_cast<int>(p >> 16); int s = static_cast<int>(p & 0xffff); return thrust::make_tuple(d, s); } */}; template<int BLOCK_THREADS> __global__ void compute_sorting_pointers(int flops_sort_arr[], int r_c_size, int segment_size, int* rowStream) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int flop; for(int i=tid; i<segment_size ; i+=blockDim.x * gridDim.x) { flop = FLOPS_SORT * (i+1); //printf("flop : %d", flop); if(flop >= r_c_size) { flops_sort_arr[i] = -1;} int cur_row = rowStream[flop-1]; int next_row = rowStream[flop]; //printf("cur_row %d next_row %d\n ", cur_row, next_row); while(flop<r_c_size && next_row == cur_row) { flop++; cur_row = next_row; next_row = rowStream[flop]; } if(flop >= r_c_size) flops_sort_arr[i] = -1; else flops_sort_arr[i] = flop; } } /* void sort_data(int size,thrust::devicevector<int> d_rows,thrust::devicevector<int> d_cols,thrust::devicevector<int> d_vals){ thrust::device_vector<int64> tmp(size); // Pack (day, site) pairs into 64-bit integers. thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(d_rows.begin(), d_cols.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_rows.end(), d_cols.end())), tmp.begin(), pack()); // Sort using the 64-bit integers as keys. thrust::sort_by_key(tmp.begin(), tmp.end(), d_vals.begin()); // Unpack (row,cols) pairs from 64-bit integers. thrust::transform( tmp.begin(), tmp.end(), thrust::make_zip_iterator(thrust::make_tuple(d_rows.begin(), d_cols.begin())), unpack()); } */ template<class T> __device__ void partition_by_bit(unsigned *keys, T* values, unsigned bit, int end) { unsigned int i = threadIdx.x; unsigned int size = blockDim.x; unsigned k_i = keys[i]; T v_i = values[i]; unsigned int p_i = (k_i >> bit) & 1; keys[i] = p_i; __syncthreads(); unsigned int T_before = plus_scan(keys); unsigned int T_total = keys[size-1]; unsigned int F_total = size - T_total; __syncthreads(); if (p_i) { keys[T_before - 1 + F_total] = k_i; values[T_before - 1 + F_total] = v_i; } else { keys[i - T_before] = k_i; values[i - T_before] = v_i; } } __global__ void get_segment_heads_for_CSR_reduction(const int64_t d_row_cols_sorted[], const QValue d_vals[], int segment_heads_for_CSR_reduction[], int size){ int num_threads = blockDim.x; int tid = threadIdx.x; int start = (blockIdx.x)*size; int end = (blockIdx.x+1)*size; /*if(tid == 0 && start == 0){ segment_heads_for_CSR_reduction[0] = 1; tid += num_threads; } */ for(int i = start + tid; i<end;i+=num_threads){ if(d_row_cols_sorted[i] != d_row_cols_sorted[i-1]){ segment_heads_for_CSR_reduction[i] = i; //printf("i == %d flags[i] = %d\n",i, flags[i]); } } } __global__ void create_flags(const int64_t d_row_cols_sorted[], const QValue d_vals[], const int d_prefix_sum_flags[], int flags[], int size){ int num_threads = blockDim.x; int tid = threadIdx.x; int start = (blockIdx.x)*size; int end = (blockIdx.x+1)*size; if(tid == 0 && start == 0){ flags[0] = 1; tid += num_threads; } for(int i = start + tid; i<end;i+=num_threads){ if(d_row_cols_sorted[i] != d_row_cols_sorted[i-1]){ flags[i] = 1; //printf("i == %d flags[i] = %d\n",i, flags[i]); } } } void SegSortPairs(CudaContext& context, int size_stream, int* rowStream, int* colStream, QValue* valueStream, int* segment_head_offsets, int NumSegs) { thrust::device_ptr<int> drowStream(rowStream); thrust::device_ptr<int> dcolStream(colStream); thrust::device_vector<int64_t> rows_cols_packed(size_stream); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(drowStream, dcolStream)), thrust::make_zip_iterator(thrust::make_tuple(drowStream+size_stream, dcolStream+size_stream)), rows_cols_packed.begin(), pack()); //TODO: Decide whether to delete rowstream and colstream or whether to just reuse this memory in the unpacked rows and cols? //Maybe could consider deleting and recreating because sizeof(rowstream) == total #flops and size of final unpacked rowstream == total #nnzs in C. There could be a huge diff between the 2 //int64_t *keys_device = thrust::raw_pointer_cast(rows_cols_packed.data()); thrust::host_vector<int64_t> rows_cols_packed_host = rows_cols_packed; int64_t *keys_host = &rows_cols_packed_host[0]; MGPU_MEM(int64_t) mgpu_rows_cols_packed = context.Malloc(keys_host,size_stream); MGPU_MEM(QValue) values = context.Malloc(valueStream, size_stream); MGPU_MEM(int) segments = context.Malloc(segment_head_offsets, NumSegs); #if ENABLE_DEBUG printf("\n\nSEG-SORT PAIRS STARTING:\n\n"); cout<<"ROWS_COLS_PACKED (KEYS):\n"; for(int i=0;i<rows_cols_packed_host.size();i++){ cout<<keys_host[i]<<" "; } cout<<endl; cout<<"VALUES: \n"; PrintArray(*values, "%9f", 10); cout<<"total_num_segments"<<NumSegs<<endl; printf("Input keys:\n"); HANDLE_ERROR(hipMemcpy(keys_host,mgpu_rows_cols_packed->get(),size_stream * sizeof(int64_t),hipMemcpyDeviceToHost)); for(int i=0;i<size_stream;i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nSegment heads:\n"); //PrintArray(*segments, "%4d", 10); #endif // Sort within segments. SegSortPairsFromIndices(mgpu_rows_cols_packed->get(), values->get(), size_stream, segments->get(),NumSegs, context); #if ENABLE_DEBUG printf("\nSorted keys :\n"); HANDLE_ERROR(hipMemcpy(keys_host,mgpu_rows_cols_packed->get(),size_stream * sizeof(int64_t),hipMemcpyDeviceToHost)); for(int i=0;i<rows_cols_packed_host.size();i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nSorted values :\n"); PrintArray(*values, "%9f", 10); #endif MGPU_MEM(int64_t) mgpu_reduced_rows_cols_packed = context.Malloc<int64_t>(size_stream); MGPU_MEM(QValue) mgpu_reduced_vals = context.Malloc<QValue>(size_stream); //reduce on row_cols_packed //numSegments = number of unique r,c values int numSegments; ReduceByKey(mgpu_rows_cols_packed->get(), values->get(), size_stream, QValue(0.0), mgpu::plus<QValue>(), mgpu::equal_to<int64_t>(), mgpu_reduced_rows_cols_packed->get(), mgpu_reduced_vals->get(), &numSegments, (int*)0, context); #if ENABLE_DEBUG printf("\nReduced keys:\n"); //PrintArray(*keysDestDevice, numSegments, "%4f", 10); HANDLE_ERROR(hipMemcpy(keys_host, mgpu_reduced_rows_cols_packed->get(), numSegments * sizeof(int64_t), hipMemcpyDeviceToHost)); for(int i=0;i<numSegments;i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nReduced values:\n"); PrintArray(*mgpu_reduced_vals, numSegments, "%4f", 10); #endif //unpack keysDestDevice into rows and cols int64_t* d_reduced_rows_cols_packed = mgpu_reduced_rows_cols_packed->get(); thrust::device_ptr<int64_t> thrust_reduced_rows_cols_packed(d_reduced_rows_cols_packed); thrust::transform( thrust_reduced_rows_cols_packed, thrust_reduced_rows_cols_packed+numSegments, thrust::make_zip_iterator(thrust::make_tuple(drowStream, dcolStream)), unpack()); #if ENABLE_PRINT int *hrowStream = NULL; int *hcolStream = NULL; QValue* hvalStream = NULL; hrowStream = (int*) malloc(numSegments * sizeof(int)); hcolStream = (int*) malloc(numSegments * sizeof(int)); hvalStream = (QValue*) malloc(numSegments * sizeof(QValue)); HANDLE_ERROR(hipMemcpy(hrowStream,rowStream,numSegments * sizeof(int),hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(hcolStream,colStream,numSegments * sizeof(int),hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(hvalStream,mgpu_reduced_vals->get(), numSegments * sizeof(QValue),hipMemcpyDeviceToHost)); for(int i=0;i<(numSegments);i++) { printf("row : %d ,col : %d, val : %f\n",hrowStream[i],hcolStream[i],hvalStream[i]); } #endif //need to handle bin 7 } template <typename T> struct is_odd : public thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return x % 2; } }; CSR sgpuSpMMWrapper(const CSR &dA, const CSR &dB, int *drowIds, const vector<int> &hv,int *dflops) { CSR dC; int *rowStream,*colStream; QValue *valueStream; int m = dA.rows; int *flops = new int[1]; HANDLE_ERROR(hipMemcpy(flops,dflops+m,4,hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMalloc((void**)&rowStream, *flops * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&colStream, *flops * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&valueStream,*flops * sizeof(QValue))); //printf("total flops :%d\n",*flops); gpu_compute_stream(dA, dB, drowIds, hv, rowStream, colStream, valueStream,dflops); HANDLE_ERROR(hipGetLastError()); // printing for checking correctness #if ENABLE_DEBUG int *hrowStream = NULL; int *hcolStream = NULL; QValue* hvalStream = NULL; hrowStream = (int*) malloc(*flops * sizeof(int)); hcolStream = (int*) malloc(*flops * sizeof(int)); hvalStream = (QValue*) malloc(*flops * sizeof(QValue)); HANDLE_ERROR(hipMemcpy(hrowStream,rowStream,*flops * sizeof(int),hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(hcolStream,colStream,*flops * sizeof(int),hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(hvalStream,valueStream,*flops * sizeof(QValue),hipMemcpyDeviceToHost)); for(int i=0;i<(*flops);i++) { printf("row : %d ,col : %d , val: %f\n",hrowStream[i],hcolStream[i],hvalStream[i]); } #endif int total_num_segments = ((*flops + FLOPS_SORT - 1)/FLOPS_SORT) - 1; // represents the size of the array printf("total number of segments : %d\n" , total_num_segments); //printf("total flops : %d\n" , (hv[7] - hv[2])); int *d_segment_heads; if(total_num_segments > 0) { HANDLE_ERROR(hipMalloc((void**)&d_segment_heads, total_num_segments * sizeof(int))); const int BLOCK_THREADS = 256; const unsigned NBLOCKS = qmin(65535, (m + BLOCK_THREADS - 1) / BLOCK_THREADS); hipLaunchKernelGGL(( compute_sorting_pointers<BLOCK_THREADS>), dim3(NBLOCKS),dim3(BLOCK_THREADS), 0, 0, d_segment_heads, *flops, total_num_segments, rowStream); } /* printing for checking correctness */ //prints the segment heads int *h_segment_heads = NULL; h_segment_heads = (int*) malloc(total_num_segments * sizeof(int)); HANDLE_ERROR(hipMemcpy(h_segment_heads, d_segment_heads, total_num_segments * sizeof(int), hipMemcpyDeviceToHost)); int idx_of_last_minus1; for(idx_of_last_minus1=total_num_segments-1 ; idx_of_last_minus1>0 && (h_segment_heads[idx_of_last_minus1] == -1); idx_of_last_minus1--){ } cout<<"idx_of_last_minus1 = "<<idx_of_last_minus1<<endl; #if ENABLE_DEBUG for(int i=0;i<total_num_segments;i++) { printf("segment head %d :%d\n" ,i, h_segment_heads[i]); } #endif /* flops array - prefix sum of flops per row after rows are sorted in ascending order of flops Note that flops[0] will always be 0. Also, flops array stores all the rows that have 0 flops also. */ // d_segment_heads contains my segment heads /* hv array - contains info about starting indices of each bin in flops array. There are at most 7 bins in total and total size of hv is at most 9 (first 2 elements are dummy). Starting index of Bin #i (where i ranges from 1..7) is located in hv[i+1]. This value will point to the index in flops array containing the first non-zero value (flops is sorted so starting elements may be 0 - see above info about flops array for more details). hv[8] -> index where bin 7 starts in flops array */ // Pack (day, site) pairs into 64-bit integers. //thrust::device_ptr<int> dvalueStream(valueStream); //sort_data(*flops,drowStream,dcolStream,dvalueStream); //TODO handle segment array size as 0 and for -1, Also finally this method should final CSR ContextPtr context = CreateCudaDevice(0); //reducing total_num_segments by 1 as the last element in d_segment_heads contains -1 //total_num_segments = idx_of_last_minus1 + 1 SegSortPairs(*context, *flops, rowStream, colStream, valueStream, d_segment_heads, idx_of_last_minus1+1); //get_segment_heads_for_CSR_reduction(const int64_t d_row_cols_sorted[], const QValue d_vals[], int segment_heads_for_CSR_reduction[], int size); //Iterator values_end = thrust::remove_if(values.begin(), values.end(), is_odd<int>()); // since the values after values_end are garbage, we'll resize the vector //values.resize(values_end - values.begin()); //uncomment later //create_flags<<<NBLOCKS, NTHREADS>>>(d_cols_sorted, d_vals, d_prefix_sum_flags, d_flags, N/NBLOCKS); /* thrust::device_ptr<int> dIC = thrust::device_pointer_cast(dC.rowPtr); thrust::exclusive_scan(dIC, dIC + m + 1, dIC); int cNnz = dIC[m]; printf("total number of nnz %d", cNnz); HANDLE_ERROR(hipMalloc((void**)&dC.colInd, cNnz * sizeof(int))); HANDLE_ERROR(hipMalloc((void**)&dC.values, cNnz * sizeof(QValue))); // performing the computation in matrix -- kernel.cu //sgpu_SpGEMM(dA, dB, drowIds, hv, dC, cNnz, temp_C_128_id, temp_C_128_val, temp_C_256_id, temp_C_256_val, temp_C_512_id, temp_C_512_val, temp_C_1024_id, temp_C_1024_val); hipDeviceSynchronize(); dC.nnz = cNnz; dC.rows = dA.rows; dC.cols = dB.cols; */ return dC; }
8e4aeda8ecbc361da069fd0decfd30281522c146.cu
#include "gpus/cuda_handle_error.h" //#include "gpus/cusparse_spmm.h" #include "gpus/gpu_csr_kernel.h" #include "gpus/timer.h" #include "tools/ntimer.h" #include "sort_network.cuh" //#include "large.cuh" #include "radix_sort.cuh" #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <thrust/device_ptr.h> #include <thrust/scan.h> #include <thrust/remove.h> #include <thrust/count.h> #include "gnnz.cuh" #include "gspgemm.cuh" #include <assert.h> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/sort.h> #include <thrust/transform.h> //#include <thrust/make_zip_iterator.h> //#include <thrust/sort_by_key.h> //#include <thrust/make_tuple.h> #include <thrust/tuple.h> #include "moderngpu.cuh" #include <inttypes.h> #define ENABLE_DEBUG 0 #define ENABLE_PRINT 0 using namespace std; using namespace mgpu; //variable that gives average size of segment? const int FLOPS_SORT = 1024; struct pack{ template <typename Tuple> __device__ __host__ int64 operator()(const Tuple &t){ return ( static_cast<int64>( thrust::get<0>(t) ) << 32 ) | thrust::get<1>(t); } /* __device__ __host__ int32 operator()(const Tuple &t){ return ( ( thrust::get<0>(t) ) << 16 ) | thrust::get<1>(t); } */}; struct unpack{ __device__ __host__ thrust::tuple<int,int> operator()(int64 p){ int d = static_cast<int>(p >> 32); int s = static_cast<int>(p & 0xffffffff); return thrust::make_tuple(d, s); } /* __device__ __host__ thrust::tuple<int,int> operator()(int32 p){ int d = static_cast<int>(p >> 16); int s = static_cast<int>(p & 0xffff); return thrust::make_tuple(d, s); } */}; template<int BLOCK_THREADS> __global__ void compute_sorting_pointers(int flops_sort_arr[], int r_c_size, int segment_size, int* rowStream) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int flop; for(int i=tid; i<segment_size ; i+=blockDim.x * gridDim.x) { flop = FLOPS_SORT * (i+1); //printf("flop : %d", flop); if(flop >= r_c_size) { flops_sort_arr[i] = -1;} int cur_row = rowStream[flop-1]; int next_row = rowStream[flop]; //printf("cur_row %d next_row %d\n ", cur_row, next_row); while(flop<r_c_size && next_row == cur_row) { flop++; cur_row = next_row; next_row = rowStream[flop]; } if(flop >= r_c_size) flops_sort_arr[i] = -1; else flops_sort_arr[i] = flop; } } /* void sort_data(int size,thrust::devicevector<int> d_rows,thrust::devicevector<int> d_cols,thrust::devicevector<int> d_vals){ thrust::device_vector<int64> tmp(size); // Pack (day, site) pairs into 64-bit integers. thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(d_rows.begin(), d_cols.begin())), thrust::make_zip_iterator(thrust::make_tuple(d_rows.end(), d_cols.end())), tmp.begin(), pack()); // Sort using the 64-bit integers as keys. thrust::sort_by_key(tmp.begin(), tmp.end(), d_vals.begin()); // Unpack (row,cols) pairs from 64-bit integers. thrust::transform( tmp.begin(), tmp.end(), thrust::make_zip_iterator(thrust::make_tuple(d_rows.begin(), d_cols.begin())), unpack()); } */ template<class T> __device__ void partition_by_bit(unsigned *keys, T* values, unsigned bit, int end) { unsigned int i = threadIdx.x; unsigned int size = blockDim.x; unsigned k_i = keys[i]; T v_i = values[i]; unsigned int p_i = (k_i >> bit) & 1; keys[i] = p_i; __syncthreads(); unsigned int T_before = plus_scan(keys); unsigned int T_total = keys[size-1]; unsigned int F_total = size - T_total; __syncthreads(); if (p_i) { keys[T_before - 1 + F_total] = k_i; values[T_before - 1 + F_total] = v_i; } else { keys[i - T_before] = k_i; values[i - T_before] = v_i; } } __global__ void get_segment_heads_for_CSR_reduction(const int64_t d_row_cols_sorted[], const QValue d_vals[], int segment_heads_for_CSR_reduction[], int size){ int num_threads = blockDim.x; int tid = threadIdx.x; int start = (blockIdx.x)*size; int end = (blockIdx.x+1)*size; /*if(tid == 0 && start == 0){ segment_heads_for_CSR_reduction[0] = 1; tid += num_threads; } */ for(int i = start + tid; i<end;i+=num_threads){ if(d_row_cols_sorted[i] != d_row_cols_sorted[i-1]){ segment_heads_for_CSR_reduction[i] = i; //printf("i == %d flags[i] = %d\n",i, flags[i]); } } } __global__ void create_flags(const int64_t d_row_cols_sorted[], const QValue d_vals[], const int d_prefix_sum_flags[], int flags[], int size){ int num_threads = blockDim.x; int tid = threadIdx.x; int start = (blockIdx.x)*size; int end = (blockIdx.x+1)*size; if(tid == 0 && start == 0){ flags[0] = 1; tid += num_threads; } for(int i = start + tid; i<end;i+=num_threads){ if(d_row_cols_sorted[i] != d_row_cols_sorted[i-1]){ flags[i] = 1; //printf("i == %d flags[i] = %d\n",i, flags[i]); } } } void SegSortPairs(CudaContext& context, int size_stream, int* rowStream, int* colStream, QValue* valueStream, int* segment_head_offsets, int NumSegs) { thrust::device_ptr<int> drowStream(rowStream); thrust::device_ptr<int> dcolStream(colStream); thrust::device_vector<int64_t> rows_cols_packed(size_stream); thrust::transform( thrust::make_zip_iterator(thrust::make_tuple(drowStream, dcolStream)), thrust::make_zip_iterator(thrust::make_tuple(drowStream+size_stream, dcolStream+size_stream)), rows_cols_packed.begin(), pack()); //TODO: Decide whether to delete rowstream and colstream or whether to just reuse this memory in the unpacked rows and cols? //Maybe could consider deleting and recreating because sizeof(rowstream) == total #flops and size of final unpacked rowstream == total #nnzs in C. There could be a huge diff between the 2 //int64_t *keys_device = thrust::raw_pointer_cast(rows_cols_packed.data()); thrust::host_vector<int64_t> rows_cols_packed_host = rows_cols_packed; int64_t *keys_host = &rows_cols_packed_host[0]; MGPU_MEM(int64_t) mgpu_rows_cols_packed = context.Malloc(keys_host,size_stream); MGPU_MEM(QValue) values = context.Malloc(valueStream, size_stream); MGPU_MEM(int) segments = context.Malloc(segment_head_offsets, NumSegs); #if ENABLE_DEBUG printf("\n\nSEG-SORT PAIRS STARTING:\n\n"); cout<<"ROWS_COLS_PACKED (KEYS):\n"; for(int i=0;i<rows_cols_packed_host.size();i++){ cout<<keys_host[i]<<" "; } cout<<endl; cout<<"VALUES: \n"; PrintArray(*values, "%9f", 10); cout<<"total_num_segments"<<NumSegs<<endl; printf("Input keys:\n"); HANDLE_ERROR(cudaMemcpy(keys_host,mgpu_rows_cols_packed->get(),size_stream * sizeof(int64_t),cudaMemcpyDeviceToHost)); for(int i=0;i<size_stream;i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nSegment heads:\n"); //PrintArray(*segments, "%4d", 10); #endif // Sort within segments. SegSortPairsFromIndices(mgpu_rows_cols_packed->get(), values->get(), size_stream, segments->get(),NumSegs, context); #if ENABLE_DEBUG printf("\nSorted keys :\n"); HANDLE_ERROR(cudaMemcpy(keys_host,mgpu_rows_cols_packed->get(),size_stream * sizeof(int64_t),cudaMemcpyDeviceToHost)); for(int i=0;i<rows_cols_packed_host.size();i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nSorted values :\n"); PrintArray(*values, "%9f", 10); #endif MGPU_MEM(int64_t) mgpu_reduced_rows_cols_packed = context.Malloc<int64_t>(size_stream); MGPU_MEM(QValue) mgpu_reduced_vals = context.Malloc<QValue>(size_stream); //reduce on row_cols_packed //numSegments = number of unique r,c values int numSegments; ReduceByKey(mgpu_rows_cols_packed->get(), values->get(), size_stream, QValue(0.0), mgpu::plus<QValue>(), mgpu::equal_to<int64_t>(), mgpu_reduced_rows_cols_packed->get(), mgpu_reduced_vals->get(), &numSegments, (int*)0, context); #if ENABLE_DEBUG printf("\nReduced keys:\n"); //PrintArray(*keysDestDevice, numSegments, "%4f", 10); HANDLE_ERROR(cudaMemcpy(keys_host, mgpu_reduced_rows_cols_packed->get(), numSegments * sizeof(int64_t), cudaMemcpyDeviceToHost)); for(int i=0;i<numSegments;i++){ cout<<keys_host[i]<<" "; } cout<<endl; printf("\nReduced values:\n"); PrintArray(*mgpu_reduced_vals, numSegments, "%4f", 10); #endif //unpack keysDestDevice into rows and cols int64_t* d_reduced_rows_cols_packed = mgpu_reduced_rows_cols_packed->get(); thrust::device_ptr<int64_t> thrust_reduced_rows_cols_packed(d_reduced_rows_cols_packed); thrust::transform( thrust_reduced_rows_cols_packed, thrust_reduced_rows_cols_packed+numSegments, thrust::make_zip_iterator(thrust::make_tuple(drowStream, dcolStream)), unpack()); #if ENABLE_PRINT int *hrowStream = NULL; int *hcolStream = NULL; QValue* hvalStream = NULL; hrowStream = (int*) malloc(numSegments * sizeof(int)); hcolStream = (int*) malloc(numSegments * sizeof(int)); hvalStream = (QValue*) malloc(numSegments * sizeof(QValue)); HANDLE_ERROR(cudaMemcpy(hrowStream,rowStream,numSegments * sizeof(int),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(hcolStream,colStream,numSegments * sizeof(int),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(hvalStream,mgpu_reduced_vals->get(), numSegments * sizeof(QValue),cudaMemcpyDeviceToHost)); for(int i=0;i<(numSegments);i++) { printf("row : %d ,col : %d, val : %f\n",hrowStream[i],hcolStream[i],hvalStream[i]); } #endif //need to handle bin 7 } template <typename T> struct is_odd : public thrust::unary_function<T,bool> { __host__ __device__ bool operator()(T x) { return x % 2; } }; CSR sgpuSpMMWrapper(const CSR &dA, const CSR &dB, int *drowIds, const vector<int> &hv,int *dflops) { CSR dC; int *rowStream,*colStream; QValue *valueStream; int m = dA.rows; int *flops = new int[1]; HANDLE_ERROR(cudaMemcpy(flops,dflops+m,4,cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMalloc((void**)&rowStream, *flops * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&colStream, *flops * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&valueStream,*flops * sizeof(QValue))); //printf("total flops :%d\n",*flops); gpu_compute_stream(dA, dB, drowIds, hv, rowStream, colStream, valueStream,dflops); HANDLE_ERROR(cudaGetLastError()); // printing for checking correctness #if ENABLE_DEBUG int *hrowStream = NULL; int *hcolStream = NULL; QValue* hvalStream = NULL; hrowStream = (int*) malloc(*flops * sizeof(int)); hcolStream = (int*) malloc(*flops * sizeof(int)); hvalStream = (QValue*) malloc(*flops * sizeof(QValue)); HANDLE_ERROR(cudaMemcpy(hrowStream,rowStream,*flops * sizeof(int),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(hcolStream,colStream,*flops * sizeof(int),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(hvalStream,valueStream,*flops * sizeof(QValue),cudaMemcpyDeviceToHost)); for(int i=0;i<(*flops);i++) { printf("row : %d ,col : %d , val: %f\n",hrowStream[i],hcolStream[i],hvalStream[i]); } #endif int total_num_segments = ((*flops + FLOPS_SORT - 1)/FLOPS_SORT) - 1; // represents the size of the array printf("total number of segments : %d\n" , total_num_segments); //printf("total flops : %d\n" , (hv[7] - hv[2])); int *d_segment_heads; if(total_num_segments > 0) { HANDLE_ERROR(cudaMalloc((void**)&d_segment_heads, total_num_segments * sizeof(int))); const int BLOCK_THREADS = 256; const unsigned NBLOCKS = qmin(65535, (m + BLOCK_THREADS - 1) / BLOCK_THREADS); compute_sorting_pointers<BLOCK_THREADS><<<NBLOCKS,BLOCK_THREADS>>>(d_segment_heads, *flops, total_num_segments, rowStream); } /* printing for checking correctness */ //prints the segment heads int *h_segment_heads = NULL; h_segment_heads = (int*) malloc(total_num_segments * sizeof(int)); HANDLE_ERROR(cudaMemcpy(h_segment_heads, d_segment_heads, total_num_segments * sizeof(int), cudaMemcpyDeviceToHost)); int idx_of_last_minus1; for(idx_of_last_minus1=total_num_segments-1 ; idx_of_last_minus1>0 && (h_segment_heads[idx_of_last_minus1] == -1); idx_of_last_minus1--){ } cout<<"idx_of_last_minus1 = "<<idx_of_last_minus1<<endl; #if ENABLE_DEBUG for(int i=0;i<total_num_segments;i++) { printf("segment head %d :%d\n" ,i, h_segment_heads[i]); } #endif /* flops array - prefix sum of flops per row after rows are sorted in ascending order of flops Note that flops[0] will always be 0. Also, flops array stores all the rows that have 0 flops also. */ // d_segment_heads contains my segment heads /* hv array - contains info about starting indices of each bin in flops array. There are at most 7 bins in total and total size of hv is at most 9 (first 2 elements are dummy). Starting index of Bin #i (where i ranges from 1..7) is located in hv[i+1]. This value will point to the index in flops array containing the first non-zero value (flops is sorted so starting elements may be 0 - see above info about flops array for more details). hv[8] -> index where bin 7 starts in flops array */ // Pack (day, site) pairs into 64-bit integers. //thrust::device_ptr<int> dvalueStream(valueStream); //sort_data(*flops,drowStream,dcolStream,dvalueStream); //TODO handle segment array size as 0 and for -1, Also finally this method should final CSR ContextPtr context = CreateCudaDevice(0); //reducing total_num_segments by 1 as the last element in d_segment_heads contains -1 //total_num_segments = idx_of_last_minus1 + 1 SegSortPairs(*context, *flops, rowStream, colStream, valueStream, d_segment_heads, idx_of_last_minus1+1); //get_segment_heads_for_CSR_reduction(const int64_t d_row_cols_sorted[], const QValue d_vals[], int segment_heads_for_CSR_reduction[], int size); //Iterator values_end = thrust::remove_if(values.begin(), values.end(), is_odd<int>()); // since the values after values_end are garbage, we'll resize the vector //values.resize(values_end - values.begin()); //uncomment later //create_flags<<<NBLOCKS, NTHREADS>>>(d_cols_sorted, d_vals, d_prefix_sum_flags, d_flags, N/NBLOCKS); /* thrust::device_ptr<int> dIC = thrust::device_pointer_cast(dC.rowPtr); thrust::exclusive_scan(dIC, dIC + m + 1, dIC); int cNnz = dIC[m]; printf("total number of nnz %d", cNnz); HANDLE_ERROR(cudaMalloc((void**)&dC.colInd, cNnz * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&dC.values, cNnz * sizeof(QValue))); // performing the computation in matrix -- kernel.cu //sgpu_SpGEMM(dA, dB, drowIds, hv, dC, cNnz, temp_C_128_id, temp_C_128_val, temp_C_256_id, temp_C_256_val, temp_C_512_id, temp_C_512_val, temp_C_1024_id, temp_C_1024_val); cudaDeviceSynchronize(); dC.nnz = cNnz; dC.rows = dA.rows; dC.cols = dB.cols; */ return dC; }
8b086feffd83cab0a4757a7ec6ab5bd5de6eefdd.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "init_vectors_reserved.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *vec = NULL; hipMalloc(&vec, XSIZE*YSIZE); const int vec_length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( init_vectors_reserved), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_length); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( init_vectors_reserved), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( init_vectors_reserved), dim3(gridBlock),dim3(threadBlock), 0, 0, vec,vec_length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8b086feffd83cab0a4757a7ec6ab5bd5de6eefdd.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "init_vectors_reserved.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *vec = NULL; cudaMalloc(&vec, XSIZE*YSIZE); const int vec_length = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); init_vectors_reserved<<<gridBlock,threadBlock>>>(vec,vec_length); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { init_vectors_reserved<<<gridBlock,threadBlock>>>(vec,vec_length); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { init_vectors_reserved<<<gridBlock,threadBlock>>>(vec,vec_length); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
a42b04a74746e31a624b976152097e9455ef0037.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test, int epoch, float eps_scale) : DataWorker(convNet, data), _test(test), _epoch(epoch), _eps_scale(eps_scale) { } //debug int minibatch=0; int gepoch = 0; // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); _convNet->setEpoch(_epoch); //debug gepoch = _epoch; size_t free_mem; size_t total_mem; hipError_t err = hipMemGetInfo(&free_mem, &total_mem); printf(" free memory %f \n", free_mem/1e6); // auxPass(); Cost& batchCost = *new Cost(0); vector<int> wrongRes; trainingPass(batchCost, wrongRes); //if(!_test) //{ // float first_err = wrongRes.size()*1./_convNet->getNumCases(); // for (int k = 0; k < 6; k++) // { // if(wrongRes.size()== 0) // break; // float err = hardPass(wrongRes, k); // if(err < .5*first_err) // break; // } //} hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } void TrainingWorker::auxPass() { //choose subset //int subset_size = .3*_dp->getNumMinibatches(); //vector<int> subset; //vector<int> mask; //for (int i = 0; i < _dp->getNumMinibatches(); i++) // mask.push_back(0); //for (int i=0; i< subset_size; i++) //{ // int r = rand()%(_dp->getNumMinibatches()-i); // int count = 0; // for (int j = 0; j < _dp->getNumMinibatches(); j++) // { // if(mask[j] == 0) // { // if(count == r) // { // mask[j] = 1; // subset.push_back(j); // break; // } // count++; // } // } //} //assert(subset.size() == subset_size); // //_convNet->zeroAuxWeights(); //int rndGradInd = rand()%subset_size; //for (int ki = 0; ki < subset_size; ki++) { // int mb_ind=subset[ki]; // float scale = 1./subset_size; // if(ki == rndGradInd) // scale = 1./subset_size - 1; // _convNet->fpropRnd(mb_ind, _epoch, PASS_AUX); // _convNet->bprop(PASS_AUX); // _convNet->procAuxWeights(scale); // } } void TrainingWorker::trainingPass(Cost& batchCost, vector<int>& wrongRes) { bool useAux = true; int err_size = 128; static int error_upd = 0; static vector<float> test_error; static float prev_err = 2; int check_num = 0; int failure_num = 0; //int epoch_switch = 90; //for (int ki = 0; ki < 1; ki++) { for (int ki = 0; ki < _dp->getNumMinibatches(); ki++) { // int mini_ind = shaffle[ki]; //debug minibatch=ki; //printf("minibatch %i \n", ki); //useAux=(ki%2==0); _convNet->setParam(_eps_scale, 1); //_convNet->fprop(mini_ind, _test ? PASS_TEST : PASS_TRAIN); vector<int> mini2pos; _convNet->fpropRnd(ki, _epoch, _test ? PASS_TEST : PASS_TRAIN, mini2pos); _convNet->getCost(batchCost); for(int indMini = 0; indMini < _convNet->getNumCases(); indMini++) { int res = _convNet->getCorrRes(indMini); if(res == 0) { wrongRes.push_back(mini2pos[indMini]); } }; bool successs = true; float err = _convNet->getErrorNum()/ _convNet->getNumCases(); float avg_neg_delta =0; float avg_delta = 0; if(test_error.size() >= err_size) { for(int i = 0; i<test_error.size() ; i++) { avg_delta += test_error[i]; if(test_error[i] < 0) { avg_neg_delta += fabs(test_error[i]); } } avg_delta *= 1./test_error.size(); avg_neg_delta *= 1./test_error.size(); } if(prev_err <= 1) { if(test_error.size() < err_size) test_error.push_back(prev_err - err); else test_error[error_upd] = prev_err - err; } float scale_rollback_stage0 = 0; if( err-prev_err > 0) { successs = false; scale_rollback_stage0 = .2 + .5*fmax(1-(err-prev_err)/avg_neg_delta,0); failure_num++; } error_upd = (error_upd+1)%err_size; prev_err = err; if (!_test) { _convNet->bprop(PASS_TRAIN); if(!successs) _convNet->rollbackWeights(scale_rollback_stage0); _convNet->updateWeights(useAux); //if(useAux) // _convNet->procAuxWeights(); } //debug aux //if(ki > 50) // exit(-1); } printf("***failures %f \n", 1.*failure_num/ _dp->getNumMinibatches()); } //wrong result pass float TrainingWorker::hardPass( vector<int>& wrongRes, int prime_off) { bool useAux = true; vector<int> wrongOut; int numMinibatches = DIVUP(wrongRes.size(), _dp->getMinibatchSize()); int fill_size = wrongRes.size()%_dp->getMinibatchSize(); int from = _convNet->getNumCases(); //printf("***hard pass %i wrongRes %f numMinibatches %i\n", prime_off, wrongRes.size()*1./_dp->getNumCases(), numMinibatches); for (int ki = 0; ki < numMinibatches; ki++) { //debug minibatch=ki; _convNet->setParam(_eps_scale, 1); vector<int> mini2pos; _convNet->fpropHard(ki, _epoch+1+prime_off, numMinibatches, _test ? PASS_TEST : PASS_TRAIN, wrongRes, mini2pos); for(int indMini = 0; indMini < mini2pos.size(); indMini++) { int res = _convNet->getCorrRes(indMini); if(res == 0) { wrongOut.push_back(mini2pos[indMini]); } }; _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(useAux); } printf("*** hard pass %i res %f \n", prime_off, wrongOut.size()*1./_dp->getNumCases()); return wrongOut.size()*1./_dp->getNumCases(); //wrongRes = wrongOut; } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } hipDeviceSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
a42b04a74746e31a624b976152097e9455ef0037.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <algorithm> #include <util.cuh> #include <worker.cuh> using namespace std; /* * ==================== * WorkResult * ==================== */ WorkResult::WorkResult(WorkResult::RESULTS resultType, Cost& results) : _resultType(resultType), _results(&results) { } WorkResult::WorkResult(WorkResult::RESULTS resultType) : _resultType(resultType), _results(NULL) { } WorkResult::~WorkResult() { delete _results; // delete NULL is ok } Cost& WorkResult::getResults() const { return *_results; } WorkResult::RESULTS WorkResult::getResultType() const { return _resultType; } /* * ==================== * Worker * ==================== */ Worker::Worker(ConvNet& convNet) : _convNet(&convNet) { } /* * ==================== * DataWorker * ==================== */ DataWorker::DataWorker(ConvNet& convNet, CPUData& data) : Worker(convNet), _data(&data) { _dp = &convNet.getDataProvider(); } DataWorker::~DataWorker() { _dp->clearData(); } /* * ==================== * TrainingWorker * ==================== */ TrainingWorker::TrainingWorker(ConvNet& convNet, CPUData& data, bool test, int epoch, float eps_scale) : DataWorker(convNet, data), _test(test), _epoch(epoch), _eps_scale(eps_scale) { } //debug int minibatch=0; int gepoch = 0; // Need to setData here (as opposed to the constructor) because the constructor executes in // the original CPU thread, which is not the one with GPU access. void TrainingWorker::run() { _dp->setData(*_data); _convNet->setEpoch(_epoch); //debug gepoch = _epoch; size_t free_mem; size_t total_mem; cudaError_t err = cudaMemGetInfo(&free_mem, &total_mem); printf(" free memory %f \n", free_mem/1e6); // auxPass(); Cost& batchCost = *new Cost(0); vector<int> wrongRes; trainingPass(batchCost, wrongRes); //if(!_test) //{ // float first_err = wrongRes.size()*1./_convNet->getNumCases(); // for (int k = 0; k < 6; k++) // { // if(wrongRes.size()== 0) // break; // float err = hardPass(wrongRes, k); // if(err < .5*first_err) // break; // } //} cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } void TrainingWorker::auxPass() { //choose subset //int subset_size = .3*_dp->getNumMinibatches(); //vector<int> subset; //vector<int> mask; //for (int i = 0; i < _dp->getNumMinibatches(); i++) // mask.push_back(0); //for (int i=0; i< subset_size; i++) //{ // int r = rand()%(_dp->getNumMinibatches()-i); // int count = 0; // for (int j = 0; j < _dp->getNumMinibatches(); j++) // { // if(mask[j] == 0) // { // if(count == r) // { // mask[j] = 1; // subset.push_back(j); // break; // } // count++; // } // } //} //assert(subset.size() == subset_size); // //_convNet->zeroAuxWeights(); //int rndGradInd = rand()%subset_size; //for (int ki = 0; ki < subset_size; ki++) { // int mb_ind=subset[ki]; // float scale = 1./subset_size; // if(ki == rndGradInd) // scale = 1./subset_size - 1; // _convNet->fpropRnd(mb_ind, _epoch, PASS_AUX); // _convNet->bprop(PASS_AUX); // _convNet->procAuxWeights(scale); // } } void TrainingWorker::trainingPass(Cost& batchCost, vector<int>& wrongRes) { bool useAux = true; int err_size = 128; static int error_upd = 0; static vector<float> test_error; static float prev_err = 2; int check_num = 0; int failure_num = 0; //int epoch_switch = 90; //for (int ki = 0; ki < 1; ki++) { for (int ki = 0; ki < _dp->getNumMinibatches(); ki++) { // int mini_ind = shaffle[ki]; //debug minibatch=ki; //printf("minibatch %i \n", ki); //useAux=(ki%2==0); _convNet->setParam(_eps_scale, 1); //_convNet->fprop(mini_ind, _test ? PASS_TEST : PASS_TRAIN); vector<int> mini2pos; _convNet->fpropRnd(ki, _epoch, _test ? PASS_TEST : PASS_TRAIN, mini2pos); _convNet->getCost(batchCost); for(int indMini = 0; indMini < _convNet->getNumCases(); indMini++) { int res = _convNet->getCorrRes(indMini); if(res == 0) { wrongRes.push_back(mini2pos[indMini]); } }; bool successs = true; float err = _convNet->getErrorNum()/ _convNet->getNumCases(); float avg_neg_delta =0; float avg_delta = 0; if(test_error.size() >= err_size) { for(int i = 0; i<test_error.size() ; i++) { avg_delta += test_error[i]; if(test_error[i] < 0) { avg_neg_delta += fabs(test_error[i]); } } avg_delta *= 1./test_error.size(); avg_neg_delta *= 1./test_error.size(); } if(prev_err <= 1) { if(test_error.size() < err_size) test_error.push_back(prev_err - err); else test_error[error_upd] = prev_err - err; } float scale_rollback_stage0 = 0; if( err-prev_err > 0) { successs = false; scale_rollback_stage0 = .2 + .5*fmax(1-(err-prev_err)/avg_neg_delta,0); failure_num++; } error_upd = (error_upd+1)%err_size; prev_err = err; if (!_test) { _convNet->bprop(PASS_TRAIN); if(!successs) _convNet->rollbackWeights(scale_rollback_stage0); _convNet->updateWeights(useAux); //if(useAux) // _convNet->procAuxWeights(); } //debug aux //if(ki > 50) // exit(-1); } printf("***failures %f \n", 1.*failure_num/ _dp->getNumMinibatches()); } //wrong result pass float TrainingWorker::hardPass( vector<int>& wrongRes, int prime_off) { bool useAux = true; vector<int> wrongOut; int numMinibatches = DIVUP(wrongRes.size(), _dp->getMinibatchSize()); int fill_size = wrongRes.size()%_dp->getMinibatchSize(); int from = _convNet->getNumCases(); //printf("***hard pass %i wrongRes %f numMinibatches %i\n", prime_off, wrongRes.size()*1./_dp->getNumCases(), numMinibatches); for (int ki = 0; ki < numMinibatches; ki++) { //debug minibatch=ki; _convNet->setParam(_eps_scale, 1); vector<int> mini2pos; _convNet->fpropHard(ki, _epoch+1+prime_off, numMinibatches, _test ? PASS_TEST : PASS_TRAIN, wrongRes, mini2pos); for(int indMini = 0; indMini < mini2pos.size(); indMini++) { int res = _convNet->getCorrRes(indMini); if(res == 0) { wrongOut.push_back(mini2pos[indMini]); } }; _convNet->bprop(PASS_TRAIN); _convNet->updateWeights(useAux); } printf("*** hard pass %i res %f \n", prime_off, wrongOut.size()*1./_dp->getNumCases()); return wrongOut.size()*1./_dp->getNumCases(); //wrongRes = wrongOut; } /* * ==================== * SyncWorker * ==================== */ SyncWorker::SyncWorker(ConvNet& convNet) : Worker(convNet) { } void SyncWorker::run() { _convNet->copyToCPU(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::SYNC_DONE)); } /* * ==================== * GradCheckWorker * ==================== */ GradCheckWorker::GradCheckWorker(ConvNet& convNet, CPUData& data) : DataWorker(convNet, data) { } void GradCheckWorker::run() { _dp->setData(*_data); _convNet->checkGradients(); exit(0); } /* * ==================== * MultiviewTestWorker * ==================== */ MultiviewTestWorker::MultiviewTestWorker(ConvNet& convNet, CPUData& data, int numViews, int logregIdx) : DataWorker(convNet, data), _numViews(numViews), _logregIdx(logregIdx) { assert(_data->getNumCases() % _numViews == 0); } void MultiviewTestWorker::run() { _dp->setData(*_data); Layer& logregLayer = _convNet->getLayer(_logregIdx); int numCasesReal = _dp->getNumCases() / _numViews; int numMiniReal = DIVUP(numCasesReal, _dp->getMinibatchSize()); Cost& batchCost = *new Cost(0); for (int i = 0; i < numMiniReal; i++) { NVMatrix softmaxActs; for (int v = 0; v < _numViews; v++) { GPUData& mini = _dp->getDataSlice(v * numCasesReal + i * _dp->getMinibatchSize(), min((v + 1) * numCasesReal, v * numCasesReal + (i + 1) * _dp->getMinibatchSize())); _convNet->fprop(mini, PASS_TEST); if (v == 0) { logregLayer.getPrev()[1]->getActs().copy(softmaxActs); } else { softmaxActs.add(logregLayer.getPrev()[1]->getActs()); } } softmaxActs.scale(1.0 / _numViews); NVMatrixV logregInput; logregInput.push_back(&logregLayer.getPrev()[0]->getActs()); logregInput.push_back(&softmaxActs); logregLayer.fprop(logregInput, PASS_TEST); _convNet->getCost(batchCost); } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); } /* * ==================== * FeatureWorker * ==================== */ FeatureWorker::FeatureWorker(ConvNet& convNet, CPUData& data, Matrix& ftrs, int layerIdx) : DataWorker(convNet, data), _ftrs(&ftrs), _layerIdx(layerIdx) { assert(ftrs.getNumRows() == data.getNumCases()); assert(!ftrs.isTrans()); } FeatureWorker::~FeatureWorker() { delete _ftrs; } void FeatureWorker::run() { _dp->setData(*_data); Layer& ftrLayer = _convNet->getLayer(_layerIdx); Cost& batchCost = *new Cost(0); for (int i = 0; i < _dp->getNumMinibatches(); i++) { _convNet->fprop(i, PASS_TEST); _convNet->getCost(batchCost); Matrix& miniFtrs = _ftrs->sliceRows(i * _dp->getMinibatchSize(), min(_dp->getNumCases(), (i + 1) * _dp->getMinibatchSize())); NVMatrix& acts = ftrLayer.getActs(); NVMatrix acts_T; if (acts.isTrans()) { NVMatrix& soft_T = acts.getTranspose(); soft_T.transpose(acts_T); delete &soft_T; } else { acts.transpose(acts_T); } acts_T.copyToHost(miniFtrs); delete &miniFtrs; } cudaThreadSynchronize(); _convNet->getResultQueue().enqueue(new WorkResult(WorkResult::BATCH_DONE, batchCost)); }
864c8121b9e93fa559d865746feb070b498d931f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <sys/time.h> #include <cstdio> #include "jacobi.h" #include "error_checks.h" // Change this to 0 if CPU reference result is not needed #define COMPUTE_CPU_REFERENCE 1 #define MAX_ITERATIONS 3000 // CPU kernel void sweepCPU(double* phi, const double *phiPrev, const double *source, double h2, int N) { int i, j; int index, i1, i2, i3, i4; for (j = 1; j < N-1; j++) { for (i = 1; i < N-1; i++) { index = i + j*N; i1 = (i-1) + j * N; i2 = (i+1) + j * N; i3 = i + (j-1) * N; i4 = i + (j+1) * N; phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] + phiPrev[i3] + phiPrev[i4] - h2 * source[index]); } } } // GPU kernel __global__ void sweepGPU(double *phi, const double *phiPrev, const double *source, double h2, int N) { #error Add here the GPU version of the update routine (see sweepCPU above) } double compareArrays(const double *a, const double *b, int N) { double error = 0.0; int i; for (i = 0; i < N*N; i++) { error += fabs(a[i] - b[i]); } return error/(N*N); } double diffCPU(const double *phi, const double *phiPrev, int N) { int i; double sum = 0; double diffsum = 0; for (i = 0; i < N*N; i++) { diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]); sum += phi[i] * phi[i]; } return sqrt(diffsum/sum); } int main() { timeval t1, t2; // Structs for timing const int N = 512; double h = 1.0 / (N - 1); int iterations; const double tolerance = 5e-4; // Stopping condition int i, j, index; const int blocksize = 16; double *phi = new double[N*N]; double *phiPrev = new double[N*N]; double *source = new double[N*N]; double *phi_cuda = new double[N*N]; double *phi_d, *phiPrev_d, *source_d; // Size of the arrays in bytes const int size = N*N*sizeof(double); double diff; // Source initialization for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double x, y; x = (i - N / 2) * h; y = (j - N / 2) * h; index = j + i * N; if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1) source[index] = 1e10*h*h; else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1) source[index] = -1e10*h*h; else source[index] = 0.0; } } CUDA_CHECK( hipMalloc( (void**)&source_d, size) ); CUDA_CHECK( hipMemcpy(source_d, source, size, hipMemcpyHostToDevice) ); // Reset values to zero for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { index = j + i * N; phi[index] = 0.0; phiPrev[index] = 0.0; } } CUDA_CHECK( hipMalloc( (void**)&phi_d, size) ); CUDA_CHECK( hipMalloc( (void**)&phiPrev_d, size) ); CUDA_CHECK( hipMemcpy(phi_d, phi, size, hipMemcpyHostToDevice) ); CUDA_CHECK( hipMemcpy(phiPrev_d, phiPrev, size, hipMemcpyHostToDevice) ); // CPU version if(COMPUTE_CPU_REFERENCE) { gettimeofday(&t1, NULL); // Do sweeps untill difference is under the tolerance diff = tolerance * 2; iterations = 0; while (diff > tolerance && iterations < MAX_ITERATIONS) { sweepCPU(phiPrev, phi, source, h * h, N); sweepCPU(phi, phiPrev, source, h * h, N); iterations += 2; if (iterations % 100 == 0) { diff = diffCPU(phi, phiPrev, N); printf("%d %g\n", iterations, diff); } } gettimeofday(&t2, NULL); printf("CPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); } // GPU version dim3 dimBlock(blocksize, blocksize); dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize); //do sweeps until diff under tolerance diff = tolerance * 2; iterations = 0; gettimeofday(&t1, NULL); while (diff > tolerance && iterations < MAX_ITERATIONS) { // See above how the CPU update kernel is called // and implement similar calling sequence for the GPU code //// Add routines here #error Add GPU kernel calls here (see CPU version above) iterations += 2; if (iterations % 100 == 0) { // diffGPU is defined in the header file, it uses // Thrust library for reduction computation diff = diffGPU<double>(phiPrev_d, phi_d, N); CHECK_ERROR_MSG("Difference computation"); printf("%d %g\n", iterations, diff); } } //// Add here the routine to copy back the results #error Copy back the results gettimeofday(&t2, NULL); printf("GPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); //// Add here the clean up code for all allocated CUDA resources #error Add here the clean up code if (COMPUTE_CPU_REFERENCE) { printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N)); } delete[] phi; delete[] phi_cuda; delete[] phiPrev; delete[] source; return EXIT_SUCCESS; }
864c8121b9e93fa559d865746feb070b498d931f.cu
#include <sys/time.h> #include <cstdio> #include "jacobi.h" #include "error_checks.h" // Change this to 0 if CPU reference result is not needed #define COMPUTE_CPU_REFERENCE 1 #define MAX_ITERATIONS 3000 // CPU kernel void sweepCPU(double* phi, const double *phiPrev, const double *source, double h2, int N) { int i, j; int index, i1, i2, i3, i4; for (j = 1; j < N-1; j++) { for (i = 1; i < N-1; i++) { index = i + j*N; i1 = (i-1) + j * N; i2 = (i+1) + j * N; i3 = i + (j-1) * N; i4 = i + (j+1) * N; phi[index] = 0.25 * (phiPrev[i1] + phiPrev[i2] + phiPrev[i3] + phiPrev[i4] - h2 * source[index]); } } } // GPU kernel __global__ void sweepGPU(double *phi, const double *phiPrev, const double *source, double h2, int N) { #error Add here the GPU version of the update routine (see sweepCPU above) } double compareArrays(const double *a, const double *b, int N) { double error = 0.0; int i; for (i = 0; i < N*N; i++) { error += fabs(a[i] - b[i]); } return error/(N*N); } double diffCPU(const double *phi, const double *phiPrev, int N) { int i; double sum = 0; double diffsum = 0; for (i = 0; i < N*N; i++) { diffsum += (phi[i] - phiPrev[i]) * (phi[i] - phiPrev[i]); sum += phi[i] * phi[i]; } return sqrt(diffsum/sum); } int main() { timeval t1, t2; // Structs for timing const int N = 512; double h = 1.0 / (N - 1); int iterations; const double tolerance = 5e-4; // Stopping condition int i, j, index; const int blocksize = 16; double *phi = new double[N*N]; double *phiPrev = new double[N*N]; double *source = new double[N*N]; double *phi_cuda = new double[N*N]; double *phi_d, *phiPrev_d, *source_d; // Size of the arrays in bytes const int size = N*N*sizeof(double); double diff; // Source initialization for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { double x, y; x = (i - N / 2) * h; y = (j - N / 2) * h; index = j + i * N; if (((x - 0.25) * (x - 0.25) + y * y) < 0.1 * 0.1) source[index] = 1e10*h*h; else if (((x + 0.25) * (x + 0.25) + y * y) < 0.1 * 0.1) source[index] = -1e10*h*h; else source[index] = 0.0; } } CUDA_CHECK( cudaMalloc( (void**)&source_d, size) ); CUDA_CHECK( cudaMemcpy(source_d, source, size, cudaMemcpyHostToDevice) ); // Reset values to zero for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { index = j + i * N; phi[index] = 0.0; phiPrev[index] = 0.0; } } CUDA_CHECK( cudaMalloc( (void**)&phi_d, size) ); CUDA_CHECK( cudaMalloc( (void**)&phiPrev_d, size) ); CUDA_CHECK( cudaMemcpy(phi_d, phi, size, cudaMemcpyHostToDevice) ); CUDA_CHECK( cudaMemcpy(phiPrev_d, phiPrev, size, cudaMemcpyHostToDevice) ); // CPU version if(COMPUTE_CPU_REFERENCE) { gettimeofday(&t1, NULL); // Do sweeps untill difference is under the tolerance diff = tolerance * 2; iterations = 0; while (diff > tolerance && iterations < MAX_ITERATIONS) { sweepCPU(phiPrev, phi, source, h * h, N); sweepCPU(phi, phiPrev, source, h * h, N); iterations += 2; if (iterations % 100 == 0) { diff = diffCPU(phi, phiPrev, N); printf("%d %g\n", iterations, diff); } } gettimeofday(&t2, NULL); printf("CPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); } // GPU version dim3 dimBlock(blocksize, blocksize); dim3 dimGrid((N + blocksize - 1) / blocksize, (N + blocksize - 1) / blocksize); //do sweeps until diff under tolerance diff = tolerance * 2; iterations = 0; gettimeofday(&t1, NULL); while (diff > tolerance && iterations < MAX_ITERATIONS) { // See above how the CPU update kernel is called // and implement similar calling sequence for the GPU code //// Add routines here #error Add GPU kernel calls here (see CPU version above) iterations += 2; if (iterations % 100 == 0) { // diffGPU is defined in the header file, it uses // Thrust library for reduction computation diff = diffGPU<double>(phiPrev_d, phi_d, N); CHECK_ERROR_MSG("Difference computation"); printf("%d %g\n", iterations, diff); } } //// Add here the routine to copy back the results #error Copy back the results gettimeofday(&t2, NULL); printf("GPU Jacobi: %g seconds, %d iterations\n", t2.tv_sec - t1.tv_sec + (t2.tv_usec - t1.tv_usec) / 1.0e6, iterations); //// Add here the clean up code for all allocated CUDA resources #error Add here the clean up code if (COMPUTE_CPU_REFERENCE) { printf("Average difference is %g\n", compareArrays(phi, phi_cuda, N)); } delete[] phi; delete[] phi_cuda; delete[] phiPrev; delete[] source; return EXIT_SUCCESS; }
932d9aaa8d3c3b5ba3d6df9487455d78267963c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void KeRelu2(const T* x, const int num, T* y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = gid; i < num; i += blockDim.x * gridDim.x) { y[i] = max(x[i], static_cast<T>(0.)); } } template <typename DeviceContext, typename T> class Relu2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_t = ctx.Input<Tensor>("X"); auto* out_t = ctx.Output<Tensor>("Y"); auto x = in_t->data<T>(); auto y = out_t->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<DeviceContext>(); int num = in_t->numel(); int block = 512; int grid = (num + block - 1) / block; hipLaunchKernelGGL(( KeRelu2<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), x, num, y); } }; template <typename T> __global__ void KeRelu2Grad(const T* y, const T* dy, const int num, T* dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = gid; i < num; i += blockDim.x * gridDim.x) { dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.); } } template <typename DeviceContext, typename T> class Relu2GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy_t = ctx.Input<Tensor>(framework::GradVarName("Y")); auto* y_t = ctx.Input<Tensor>("Y"); auto* dx_t = ctx.Output<Tensor>(framework::GradVarName("X")); auto dy = dy_t->data<T>(); auto y = y_t->data<T>(); auto dx = dx_t->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<DeviceContext>(); int num = dy_t->numel(); int block = 512; int grid = (num + block - 1) / block; hipLaunchKernelGGL(( KeRelu2Grad<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), y, dy, num, dx); } }; } // namespace operators } // namespace paddle using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(relu2, paddle::operators::Relu2CUDAKernel<CUDA, float>, paddle::operators::Relu2CUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL(relu2_grad, paddle::operators::Relu2GradCUDAKernel<CUDA, float>, paddle::operators::Relu2GradCUDAKernel<CUDA, double>);
932d9aaa8d3c3b5ba3d6df9487455d78267963c4.cu
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; template <typename T> __global__ void KeRelu2(const T* x, const int num, T* y) { int gid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = gid; i < num; i += blockDim.x * gridDim.x) { y[i] = max(x[i], static_cast<T>(0.)); } } template <typename DeviceContext, typename T> class Relu2CUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* in_t = ctx.Input<Tensor>("X"); auto* out_t = ctx.Output<Tensor>("Y"); auto x = in_t->data<T>(); auto y = out_t->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<DeviceContext>(); int num = in_t->numel(); int block = 512; int grid = (num + block - 1) / block; KeRelu2<T><<<grid, block, 0, dev_ctx.stream()>>>(x, num, y); } }; template <typename T> __global__ void KeRelu2Grad(const T* y, const T* dy, const int num, T* dx) { int gid = blockIdx.x * blockDim.x + threadIdx.x; for (int i = gid; i < num; i += blockDim.x * gridDim.x) { dx[i] = dy[i] * (y[i] > 0 ? 1. : 0.); } } template <typename DeviceContext, typename T> class Relu2GradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* dy_t = ctx.Input<Tensor>(framework::GradVarName("Y")); auto* y_t = ctx.Input<Tensor>("Y"); auto* dx_t = ctx.Output<Tensor>(framework::GradVarName("X")); auto dy = dy_t->data<T>(); auto y = y_t->data<T>(); auto dx = dx_t->mutable_data<T>(ctx.GetPlace()); auto& dev_ctx = ctx.template device_context<DeviceContext>(); int num = dy_t->numel(); int block = 512; int grid = (num + block - 1) / block; KeRelu2Grad<T><<<grid, block, 0, dev_ctx.stream()>>>(y, dy, num, dx); } }; } // namespace operators } // namespace paddle using CUDA = paddle::platform::CUDADeviceContext; REGISTER_OP_CUDA_KERNEL(relu2, paddle::operators::Relu2CUDAKernel<CUDA, float>, paddle::operators::Relu2CUDAKernel<CUDA, double>); REGISTER_OP_CUDA_KERNEL(relu2_grad, paddle::operators::Relu2GradCUDAKernel<CUDA, float>, paddle::operators::Relu2GradCUDAKernel<CUDA, double>);
f3cd2cc1db758cc0fc14d3b443354d6a4f42bcf5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/depthwise_conv.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { // A Cuda kernel to compute the depthwise convolution forward pass // in NCHW format. template <typename T> __global__ void KernelDepthwiseConv( const int nthreads, const T* const input_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const output_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / output_channels / output_height / output_width; const int c_out = (index / output_height / output_width) % output_channels; const int h_out = (index / output_width) % output_height; const int w_out = index % output_width; const int c_in = c_out / filter_multiplier; const T* weight = filter_data + c_out * filter_height * filter_width; T value = 0; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = h_in_start + filter_height; const int w_in_end = w_in_start + filter_width; const int in_offset = ((batch * input_channels + c_in) * input_height) * input_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; value += weight[(h_in - h_in_start) * filter_width + (w_in - w_in_start)] * input_data[offset]; } } output_data[index] = value; } } // CUDA kernel to compute the depthwise convolution backprop w.r.t input. template <typename T> __global__ void KernelDepthwiseConvInputGrad( const int nthreads, const T* const output_grad_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const input_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / input_channels / input_height / input_width; const int c_in = (index / input_height / input_width) % input_channels; const int h_in = (index / input_width) % input_height; const int w_in = index % input_width; const int c_out_start = c_in * filter_multiplier; int h_out_start = (h_in - filter_height + padding_height + stride_height) / stride_height; h_out_start = 0 > h_out_start ? 0 : h_out_start; int h_out_end = (h_in + padding_height) / stride_height; h_out_end = output_height - 1 < h_out_end ? output_height - 1 : h_out_end; int w_out_start = (w_in - filter_width + padding_width + stride_width) / stride_width; w_out_start = 0 > w_out_start ? 0 : w_out_start; int w_out_end = (w_in + padding_width) / stride_width; w_out_end = output_width - 1 < w_out_end ? output_width - 1 : w_out_end; T value = 0; for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier; c_out++) { for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) { const int filter_h = h_in + padding_height - h_out * stride_height; for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) { const int filter_w = w_in + padding_width - w_out * stride_width; const int filter_offset = c_out * filter_height * filter_width + filter_h * filter_width + filter_w; const int output_grad_offset = ((batch * output_channels + c_out) * output_height + h_out) * output_width + w_out; value += output_grad_data[output_grad_offset] * filter_data[filter_offset]; } } } input_grad_data[index] += value; } } // Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. template <typename T> __global__ void KernelDepthwiseConvFilterGrad( const int nthreads, const T* const output_grad_data, const T* const input_data, const int num, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const filter_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int w_out = index % output_width; const int h_out = (index / output_width) % output_height; const int c_out = (index / output_width / output_height) % output_channels; const int batch = (index / output_width / output_height / output_channels); const int c_in = c_out / filter_multiplier; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = -padding_height + h_out * stride_height + filter_height; const int w_in_end = -padding_width + w_out * stride_width + filter_width; const int in_offset = (batch * input_channels + c_in) * input_height * input_width; T* addr_offset = filter_grad_data + c_out * filter_height * filter_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; const T diff_temp = output_grad_data[index] * input_data[offset]; T* addr = addr_offset + (h_in - h_in_start) * filter_width + (w_in - w_in_start); paddle::platform::CudaAtomicAdd(addr, diff_temp); } } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <class T> class DepthwiseConvFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* filter_data = filter.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConv<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, input_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data); } }; template <typename T> class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* filter_data = filter.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConvInputGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template <typename T> class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* filter_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter_grad->dims()[2]; const int ksize_width = filter_grad->dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); hipLaunchKernelGGL(( KernelDepthwiseConvFilterGrad<T>), dim3(grid), dim3(threads), 0, context.stream(), nthreads, output_grad_data, input_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, filter_grad_data); } }; template class DepthwiseConvFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
f3cd2cc1db758cc0fc14d3b443354d6a4f42bcf5.cu
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/operators/math/depthwise_conv.h" #include "paddle/platform/cuda_helper.h" namespace paddle { namespace operators { namespace math { // A Cuda kernel to compute the depthwise convolution forward pass // in NCHW format. template <typename T> __global__ void KernelDepthwiseConv( const int nthreads, const T* const input_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const output_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / output_channels / output_height / output_width; const int c_out = (index / output_height / output_width) % output_channels; const int h_out = (index / output_width) % output_height; const int w_out = index % output_width; const int c_in = c_out / filter_multiplier; const T* weight = filter_data + c_out * filter_height * filter_width; T value = 0; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = h_in_start + filter_height; const int w_in_end = w_in_start + filter_width; const int in_offset = ((batch * input_channels + c_in) * input_height) * input_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; value += weight[(h_in - h_in_start) * filter_width + (w_in - w_in_start)] * input_data[offset]; } } output_data[index] = value; } } // CUDA kernel to compute the depthwise convolution backprop w.r.t input. template <typename T> __global__ void KernelDepthwiseConvInputGrad( const int nthreads, const T* const output_grad_data, const T* const filter_data, const int batch_size, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const input_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int batch = index / input_channels / input_height / input_width; const int c_in = (index / input_height / input_width) % input_channels; const int h_in = (index / input_width) % input_height; const int w_in = index % input_width; const int c_out_start = c_in * filter_multiplier; int h_out_start = (h_in - filter_height + padding_height + stride_height) / stride_height; h_out_start = 0 > h_out_start ? 0 : h_out_start; int h_out_end = (h_in + padding_height) / stride_height; h_out_end = output_height - 1 < h_out_end ? output_height - 1 : h_out_end; int w_out_start = (w_in - filter_width + padding_width + stride_width) / stride_width; w_out_start = 0 > w_out_start ? 0 : w_out_start; int w_out_end = (w_in + padding_width) / stride_width; w_out_end = output_width - 1 < w_out_end ? output_width - 1 : w_out_end; T value = 0; for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier; c_out++) { for (int h_out = h_out_start; h_out <= h_out_end; ++h_out) { const int filter_h = h_in + padding_height - h_out * stride_height; for (int w_out = w_out_start; w_out <= w_out_end; ++w_out) { const int filter_w = w_in + padding_width - w_out * stride_width; const int filter_offset = c_out * filter_height * filter_width + filter_h * filter_width + filter_w; const int output_grad_offset = ((batch * output_channels + c_out) * output_height + h_out) * output_width + w_out; value += output_grad_data[output_grad_offset] * filter_data[filter_offset]; } } } input_grad_data[index] += value; } } // Cuda kernel to compute the depthwise convolution backprop w.r.t. filter. template <typename T> __global__ void KernelDepthwiseConvFilterGrad( const int nthreads, const T* const output_grad_data, const T* const input_data, const int num, const int output_channels, const int output_height, const int output_width, const int input_channels, const int input_height, const int input_width, const int filter_multiplier, const int filter_height, const int filter_width, const int stride_height, const int stride_width, const int padding_height, const int padding_width, T* const filter_grad_data) { int index = (blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x; if (index < nthreads) { const int w_out = index % output_width; const int h_out = (index / output_width) % output_height; const int c_out = (index / output_width / output_height) % output_channels; const int batch = (index / output_width / output_height / output_channels); const int c_in = c_out / filter_multiplier; const int h_in_start = -padding_height + h_out * stride_height; const int w_in_start = -padding_width + w_out * stride_width; const int h_in_end = -padding_height + h_out * stride_height + filter_height; const int w_in_end = -padding_width + w_out * stride_width + filter_width; const int in_offset = (batch * input_channels + c_in) * input_height * input_width; T* addr_offset = filter_grad_data + c_out * filter_height * filter_width; const int h_end = h_in_end < input_height ? h_in_end : input_height; const int w_end = w_in_end < input_width ? w_in_end : input_width; const int h_start = h_in_start > 0 ? h_in_start : 0; const int w_start = w_in_start > 0 ? w_in_start : 0; for (int h_in = h_start; h_in < h_end; h_in++) { for (int w_in = w_start; w_in < w_end; w_in++) { const int offset = in_offset + h_in * input_width + w_in; const T diff_temp = output_grad_data[index] * input_data[offset]; T* addr = addr_offset + (h_in - h_in_start) * filter_width + (w_in - w_in_start); paddle::platform::CudaAtomicAdd(addr, diff_temp); } } } } /* * All tensors are in NCHW format. * Ksize, strides, paddings are two elements. These two elements represent * height and width, respectively. */ template <class T> class DepthwiseConvFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* output) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output->dims()[1]; const int output_height = output->dims()[2]; const int output_width = output->dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* filter_data = filter.data<T>(); T* output_data = output->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConv<T><<<grid, threads, 0, context.stream()>>>( nthreads, input_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, output_data); } }; template <typename T> class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& filter, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* input_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter.dims()[2]; const int ksize_width = filter.dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* filter_data = filter.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * input_channels * input_height * input_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConvInputGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, filter_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, input_grad_data); } }; template <typename T> class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T> { public: void operator()(const platform::CUDADeviceContext& context, const framework::Tensor& input, const framework::Tensor& output_grad, const std::vector<int>& strides, const std::vector<int>& paddings, framework::Tensor* filter_grad) { const int batch_size = input.dims()[0]; const int input_channels = input.dims()[1]; const int input_height = input.dims()[2]; const int input_width = input.dims()[3]; const int output_channels = output_grad.dims()[1]; const int output_height = output_grad.dims()[2]; const int output_width = output_grad.dims()[3]; const int ksize_height = filter_grad->dims()[2]; const int ksize_width = filter_grad->dims()[3]; const int stride_height = strides[0]; const int stride_width = strides[1]; const int padding_height = paddings[0]; const int padding_width = paddings[1]; const T* input_data = input.data<T>(); const T* output_grad_data = output_grad.data<T>(); T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace()); int nthreads = batch_size * output_channels * output_height * output_width; int blocks = (nthreads + 1024 - 1) / 1024; dim3 threads(1024, 1); dim3 grid(blocks, 1); KernelDepthwiseConvFilterGrad<T><<<grid, threads, 0, context.stream()>>>( nthreads, output_grad_data, input_data, batch_size, output_channels, output_height, output_width, input_channels, input_height, input_width, output_channels / input_channels, ksize_height, ksize_width, stride_height, stride_width, padding_height, padding_width, filter_grad_data); } }; template class DepthwiseConvFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, double>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, float>; template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, double>; } // namespace math } // namespace operators } // namespace paddle
4b701cd05bb7ef2eee08236faff7f07f22e5a5e6.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "array_copy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *c = NULL; hipMalloc(&c, XSIZE*YSIZE); size_t mx = 1; size_t my = 1; size_t mz = 1; size_t sx = 1; size_t sy = 1; size_t sz = 1; size_t ox = 1; size_t oy = 1; size_t oz = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( array_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( array_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( array_copy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
4b701cd05bb7ef2eee08236faff7f07f22e5a5e6.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "array_copy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *c = NULL; cudaMalloc(&c, XSIZE*YSIZE); size_t mx = 1; size_t my = 1; size_t mz = 1; size_t sx = 1; size_t sy = 1; size_t sz = 1; size_t ox = 1; size_t oy = 1; size_t oz = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); array_copy<<<gridBlock,threadBlock>>>(a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { array_copy<<<gridBlock,threadBlock>>>(a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { array_copy<<<gridBlock,threadBlock>>>(a,c,mx,my,mz,sx,sy,sz,ox,oy,oz); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
58c43d9e559f4777554f1842d2f9757150fc4b66.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" #include "hip/hip_runtime.h" #define RANDOM_MAX 2147483647 #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif #define NUM_OF_GPU_THREADS 1024 extern double wtime(void); int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return(index); } __device__ __host__ int cuda_find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float *pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i = 0; i < npts; i++) { float dist; dist = cuda_euclid_dist_2(pt, pts + i * nfeatures, nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return (index); } /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } __device__ __host__ __inline float cuda_euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans = 0.0; for (i = 0; i < numdims; i++) ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]); return (ans); } /*----< kmeans_clustering() >---------------------------------------------*/ float** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n=0, index, loop=0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ /* allocate space for returning variable clusters[] */ clusters = (float**) malloc(nclusters * sizeof(float*)); clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float)); for (i=1; i<nclusters; i++) clusters[i] = clusters[i-1] + nfeatures; /* randomly pick cluster centers */ for (i=0; i<nclusters; i++) { //n = (int)rand() % npoints; for (j=0; j<nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } for (i=0; i<npoints; i++) membership[i] = -1; /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int*) calloc(nclusters, sizeof(int)); new_centers = (float**) malloc(nclusters * sizeof(float*)); new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float)); for (i=1; i<nclusters; i++) new_centers[i] = new_centers[i-1] + nfeatures; do { delta = 0.0; for (i=0; i<npoints; i++) { /* find the index of nestest cluster centers */ index = find_nearest_point(feature[i], nfeatures, clusters, nclusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of objects located within */ new_centers_len[index]++; for (j=0; j<nfeatures; j++) new_centers[index][j] += feature[i][j]; } /* for(int i = 0; i < nclusters; i++) { printf("new_centers_len[%d]=%d\n", i, new_centers_len[i]); } */ /* printf("new_centers\n"); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { printf("%f ", new_centers[i][j]); } printf("\n"); } printf("\n\n"); */ /* replace old cluster centers with new_centers */ for (i=0; i<nclusters; i++) { for (j=0; j<nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } //delta /= npoints; //printf("delta %f\n", delta); } while (delta > threshold); free(new_centers[0]); free(new_centers); free(new_centers_len); return clusters; } void checkError(hipError_t err, int line) { if (hipSuccess != err) { printf("Error "); printf("%s", hipGetErrorName(err)); printf(" happenend: "); printf("%s", hipGetErrorString(err)); printf(" at line %d", line); exit(-1); } } __global__ void IntReduction(int* cuda_delta_array, int n, int* cuda_delta_result_array) { __shared__ int shared_delta_array[NUM_OF_GPU_THREADS]; int myId = blockIdx.x * blockDim.x + threadIdx.x; int localId = threadIdx.x; __syncthreads(); if (myId < n) { shared_delta_array[localId] = cuda_delta_array[myId]; } __syncthreads(); //if (myId < n) { if (blockIdx.x < gridDim.x - 1) { // printf("entered if"); // 1024 elements to reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (localId < s && myId < n) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } else { // find number of elements remaining in array -- then reduce those int remainingElements = n - blockIdx.x * NUM_OF_GPU_THREADS; // printf("entered else, elems remaining: %d\n", remainingElements); int b = 1; while (b < remainingElements) // nearest larger power of 2 { b = b << 1; } //printf("remaining=%d, b = %d, globalId=%d, localId=%d\n", remainingElements, b, localId, myId); for (unsigned int s = b / 2; s > 0; s >>= 1) { if ((localId < s) && (localId + s < remainingElements) && (myId < n)) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } // only element with local 0 id places result into resulting arrays if (localId == 0) { //printf("shared_d_array[%d] = %f\n", localId, shared_d_array[localId]); cuda_delta_result_array[blockIdx.x] = shared_delta_array[0]; } __syncthreads(); } __global__ void FloatReduction(float* cuda_delta_array, int n, float* cuda_delta_result_array) { __shared__ float shared_delta_array[NUM_OF_GPU_THREADS]; int myId = blockIdx.x * blockDim.x + threadIdx.x; int localId = threadIdx.x; __syncthreads(); if (myId < n) { shared_delta_array[localId] = cuda_delta_array[myId]; } __syncthreads(); //if (myId < n) { if (blockIdx.x < gridDim.x - 1) { // printf("entered if"); // 1024 elements to reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (localId < s && myId < n) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } else { // find number of elements remaining in array -- then reduce those int remainingElements = n - blockIdx.x * NUM_OF_GPU_THREADS; // printf("entered else, elems remaining: %d\n", remainingElements); int b = 1; while (b < remainingElements) // nearest larger power of 2 { b = b << 1; } //printf("remaining=%d, b = %d, globalId=%d, localId=%d\n", remainingElements, b, localId, myId); for (unsigned int s = b / 2; s > 0; s >>= 1) { if ((localId < s) && (localId + s < remainingElements) && (myId < n)) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } // only element with local 0 id places result into resulting arrays if (localId == 0) { //printf("shared_d_array[%d] = %f\n", localId, shared_d_array[localId]); cuda_delta_result_array[blockIdx.x] = shared_delta_array[0]; } __syncthreads(); } __global__ void updateNewCenters(int npoints, float* cuda_delta_array, int* cuda_membership, float* cuda_features, int nfeatures, float* cuda_clusters, int nclusters, int* cuda_new_centers_len, float* cuda_new_centers) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; int local_id = threadIdx.x; float delta = 0; if(global_id < npoints) { int membership = cuda_membership[global_id]; // global mem access int index = cuda_find_nearest_point(cuda_features + global_id * nfeatures, nfeatures, cuda_clusters, nclusters); //printf("id=%d membership=%d index=%d\n", global_id, membership, index); if (membership != index) { delta = 1; // local delta every thread // send to global delta array } cuda_membership[global_id] = membership; cuda_delta_array[global_id] = delta; if(delta != 0) { // printf("global_id=%d, local_id=%d, delta=%d\n", global_id, local_id, delta); } cuda_delta_array[global_id] = delta; cuda_membership[global_id] = index; for(int i = 0; i < nclusters; i++) { cuda_new_centers_len[i * npoints + global_id] = 0; for(int j = 0; j < nfeatures; j++) { cuda_new_centers[(i * nfeatures + j) * npoints + global_id] = 0; } } cuda_new_centers_len[index * npoints + global_id] = 1; // race condition for (int j = 0; j < nfeatures; j++) cuda_new_centers[(index * nfeatures + j) * npoints + global_id] += cuda_features[global_id * nfeatures + j]; } } /*----< kmeans_clustering() >---------------------------------------------*/ float **kmeans_clustering_par(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n = 0, index, loop = 0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ dim3 gridDim((npoints + NUM_OF_GPU_THREADS - 1) / NUM_OF_GPU_THREADS); dim3 blockDim(NUM_OF_GPU_THREADS); /* allocate space for returning variable clusters[] */ /* allocate space for returning variable clusters[] */ clusters = (float **)malloc(nclusters * sizeof(float *)); clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float)); for (i = 1; i < nclusters; i++) clusters[i] = clusters[i - 1] + nfeatures; /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { //n = (int)rand() % npoints; for (j = 0; j < nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } // INICIJALIZUJE CPU I KOPIRA U GLOBALNU MEMORIJU float * cuda_clusters; checkError(hipMalloc(&cuda_clusters, nclusters * nfeatures * sizeof(float)), __LINE__); for (i = 0; i < npoints; i++) membership[i] = -1; int* cuda_membership; checkError(hipMalloc(&cuda_membership, npoints * sizeof(int)), __LINE__); checkError(hipMemcpy(cuda_membership, membership, npoints * sizeof(int), hipMemcpyHostToDevice), __LINE__); // init cuda features float* cuda_features; checkError(hipMalloc(&cuda_features, npoints * nfeatures * sizeof(float)), __LINE__); checkError(hipMemcpy(cuda_features, feature[0], npoints * nfeatures * sizeof(float), hipMemcpyHostToDevice), __LINE__); // INICIJALIZUJE CPU /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int *)calloc(nclusters, sizeof(int)); int* cuda_new_centers_len; checkError(hipMalloc(&cuda_new_centers_len, nclusters * npoints * sizeof(int)), __LINE__); int* cuda_new_centers_len_result; checkError(hipMalloc(&cuda_new_centers_len_result, nclusters * gridDim.x * sizeof(int)), __LINE__); int* new_centers_len_result; new_centers_len_result = (int*)calloc(nclusters * gridDim.x, sizeof(int)); new_centers = (float **)malloc(nclusters * sizeof(float *)); new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float)); for (i = 1; i < nclusters; i++) new_centers[i] = new_centers[i - 1] + nfeatures; float * cuda_new_centers; checkError(hipMalloc(&cuda_new_centers, nclusters * nfeatures * npoints * sizeof(float)), __LINE__); float * cuda_new_centers_result; checkError(hipMalloc(&cuda_new_centers_result, nclusters * nfeatures * gridDim.x * sizeof(float)), __LINE__); float * new_centers_result = (float*)malloc(nclusters * nfeatures * gridDim.x * sizeof(float)); /*float * cuda_new_centers; checkError(hipMalloc(&cuda_new_centers, nclusters * nfeatures * sizeof(float)), __LINE__); checkError(hipMemcpy(cuda_new_centers, new_centers[0], nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice), __LINE__); */ // create cuda_delta array float* delta_result_array; delta_result_array = (float*)malloc(gridDim.x * sizeof(float)); float* cuda_delta_array; checkError(hipMalloc(&cuda_delta_array, npoints * sizeof(float)), __LINE__); float* cuda_delta_result_array; checkError(hipMalloc(&cuda_delta_result_array, gridDim.x * sizeof(float)), __LINE__); // create new_centers_len //printf("KERNEL pre\n"); //printf("gridDim.x=%d, blockDim.x=%d\n", gridDim.x, blockDim.x); do { checkError(hipMemcpy(cuda_clusters, clusters[0], nclusters * nfeatures * sizeof(float), hipMemcpyHostToDevice), __LINE__);; hipLaunchKernelGGL(( updateNewCenters) , dim3(gridDim), dim3(blockDim) , 0, 0, npoints, cuda_delta_array, cuda_membership, cuda_features, nfeatures, cuda_clusters, nclusters, cuda_new_centers_len, cuda_new_centers); checkError(hipDeviceSynchronize(), __LINE__); hipLaunchKernelGGL(( FloatReduction) , dim3(gridDim), dim3(blockDim) , 0, 0, cuda_delta_array, npoints, cuda_delta_result_array); checkError(hipDeviceSynchronize(), __LINE__); checkError(hipMemcpy(delta_result_array, cuda_delta_result_array, gridDim.x*sizeof(float), hipMemcpyDeviceToHost), __LINE__); delta = 0.0f; for(int i = 0; i < gridDim.x; i++) { // printf("delta[%d]=%f\n", i, delta_result_array[i]); delta += delta_result_array[i]; } // printf("KERNEL posle delta = %f\n", delta); for(int i = 0; i < nclusters; i++) { hipLaunchKernelGGL(( IntReduction) , dim3(gridDim), dim3(blockDim), 0, 0, cuda_new_centers_len + i * npoints, npoints, cuda_new_centers_len_result + i * gridDim.x); checkError(hipDeviceSynchronize(), __LINE__); } checkError(hipDeviceSynchronize(), __LINE__); checkError(hipMemcpy(new_centers_len_result, cuda_new_centers_len_result, gridDim.x * nclusters * sizeof(int), hipMemcpyDeviceToHost ), __LINE__); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < gridDim.x; j++) { new_centers_len[i] += new_centers_len_result[i*gridDim.x + j]; } } /* for(int i = 0; i < nclusters; i++) { printf("new_centers_len[%d]=%d\n", i, new_centers_len[i]); } */ // cuda_new_centers[(index * nfeatures + j) * npoints + global_id] += cuda_feature[global_id * nfeatures + j]; for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { hipLaunchKernelGGL(( FloatReduction) , dim3(gridDim), dim3(blockDim), 0, 0, cuda_new_centers + (i * nfeatures + j) * npoints , npoints, cuda_new_centers_result + (i * nfeatures + j) * gridDim.x); } } checkError(hipDeviceSynchronize(), __LINE__); checkError(hipMemcpy(new_centers_result, cuda_new_centers_result, nclusters * nfeatures * gridDim.x * sizeof(float), hipMemcpyDeviceToHost), __LINE__); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { for(int k = 0; k < gridDim.x; k++) { new_centers[i][j] += new_centers_result[(i * nfeatures + j) * gridDim.x + k]; } } } /* replace old cluster centers with new_centers */ for (i = 0; i < nclusters; i++) { for (j = 0; j < nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } } while(delta > threshold); checkError(hipFree(cuda_delta_array), __LINE__); checkError(hipFree(cuda_delta_result_array), __LINE__); checkError(hipFree(cuda_new_centers_len), __LINE__); checkError(hipFree(cuda_new_centers_len_result), __LINE__); checkError(hipFree(cuda_new_centers), __LINE__); checkError(hipFree(cuda_new_centers_result), __LINE__); free(new_centers[0]); free(new_centers); free(new_centers_len); free(delta_result_array); free(new_centers_len_result); free(new_centers_result); return clusters; }
58c43d9e559f4777554f1842d2f9757150fc4b66.cu
#include <stdio.h> #include <stdlib.h> #include <float.h> #include <math.h> #include "kmeans.h" #include "cuda_runtime.h" #define RANDOM_MAX 2147483647 #ifndef FLT_MAX #define FLT_MAX 3.40282347e+38 #endif #define NUM_OF_GPU_THREADS 1024 extern double wtime(void); int find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float **pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist=FLT_MAX; /* find the cluster center id with min distance to pt */ for (i=0; i<npts; i++) { float dist; dist = euclid_dist_2(pt, pts[i], nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return(index); } __device__ __host__ int cuda_find_nearest_point(float *pt, /* [nfeatures] */ int nfeatures, float *pts, /* [npts][nfeatures] */ int npts) { int index, i; float min_dist = FLT_MAX; /* find the cluster center id with min distance to pt */ for (i = 0; i < npts; i++) { float dist; dist = cuda_euclid_dist_2(pt, pts + i * nfeatures, nfeatures); /* no need square root */ if (dist < min_dist) { min_dist = dist; index = i; } } return (index); } /*----< euclid_dist_2() >----------------------------------------------------*/ /* multi-dimensional spatial Euclid distance square */ __inline float euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans=0.0; for (i=0; i<numdims; i++) ans += (pt1[i]-pt2[i]) * (pt1[i]-pt2[i]); return(ans); } __device__ __host__ __inline float cuda_euclid_dist_2(float *pt1, float *pt2, int numdims) { int i; float ans = 0.0; for (i = 0; i < numdims; i++) ans += (pt1[i] - pt2[i]) * (pt1[i] - pt2[i]); return (ans); } /*----< kmeans_clustering() >---------------------------------------------*/ float** kmeans_clustering(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n=0, index, loop=0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ /* allocate space for returning variable clusters[] */ clusters = (float**) malloc(nclusters * sizeof(float*)); clusters[0] = (float*) malloc(nclusters * nfeatures * sizeof(float)); for (i=1; i<nclusters; i++) clusters[i] = clusters[i-1] + nfeatures; /* randomly pick cluster centers */ for (i=0; i<nclusters; i++) { //n = (int)rand() % npoints; for (j=0; j<nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } for (i=0; i<npoints; i++) membership[i] = -1; /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int*) calloc(nclusters, sizeof(int)); new_centers = (float**) malloc(nclusters * sizeof(float*)); new_centers[0] = (float*) calloc(nclusters * nfeatures, sizeof(float)); for (i=1; i<nclusters; i++) new_centers[i] = new_centers[i-1] + nfeatures; do { delta = 0.0; for (i=0; i<npoints; i++) { /* find the index of nestest cluster centers */ index = find_nearest_point(feature[i], nfeatures, clusters, nclusters); /* if membership changes, increase delta by 1 */ if (membership[i] != index) delta += 1.0; /* assign the membership to object i */ membership[i] = index; /* update new cluster centers : sum of objects located within */ new_centers_len[index]++; for (j=0; j<nfeatures; j++) new_centers[index][j] += feature[i][j]; } /* for(int i = 0; i < nclusters; i++) { printf("new_centers_len[%d]=%d\n", i, new_centers_len[i]); } */ /* printf("new_centers\n"); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { printf("%f ", new_centers[i][j]); } printf("\n"); } printf("\n\n"); */ /* replace old cluster centers with new_centers */ for (i=0; i<nclusters; i++) { for (j=0; j<nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } //delta /= npoints; //printf("delta %f\n", delta); } while (delta > threshold); free(new_centers[0]); free(new_centers); free(new_centers_len); return clusters; } void checkError(cudaError_t err, int line) { if (cudaSuccess != err) { printf("Error "); printf("%s", cudaGetErrorName(err)); printf(" happenend: "); printf("%s", cudaGetErrorString(err)); printf(" at line %d", line); exit(-1); } } __global__ void IntReduction(int* cuda_delta_array, int n, int* cuda_delta_result_array) { __shared__ int shared_delta_array[NUM_OF_GPU_THREADS]; int myId = blockIdx.x * blockDim.x + threadIdx.x; int localId = threadIdx.x; __syncthreads(); if (myId < n) { shared_delta_array[localId] = cuda_delta_array[myId]; } __syncthreads(); //if (myId < n) { if (blockIdx.x < gridDim.x - 1) { // printf("entered if"); // 1024 elements to reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (localId < s && myId < n) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } else { // find number of elements remaining in array -- then reduce those int remainingElements = n - blockIdx.x * NUM_OF_GPU_THREADS; // printf("entered else, elems remaining: %d\n", remainingElements); int b = 1; while (b < remainingElements) // nearest larger power of 2 { b = b << 1; } //printf("remaining=%d, b = %d, globalId=%d, localId=%d\n", remainingElements, b, localId, myId); for (unsigned int s = b / 2; s > 0; s >>= 1) { if ((localId < s) && (localId + s < remainingElements) && (myId < n)) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } // only element with local 0 id places result into resulting arrays if (localId == 0) { //printf("shared_d_array[%d] = %f\n", localId, shared_d_array[localId]); cuda_delta_result_array[blockIdx.x] = shared_delta_array[0]; } __syncthreads(); } __global__ void FloatReduction(float* cuda_delta_array, int n, float* cuda_delta_result_array) { __shared__ float shared_delta_array[NUM_OF_GPU_THREADS]; int myId = blockIdx.x * blockDim.x + threadIdx.x; int localId = threadIdx.x; __syncthreads(); if (myId < n) { shared_delta_array[localId] = cuda_delta_array[myId]; } __syncthreads(); //if (myId < n) { if (blockIdx.x < gridDim.x - 1) { // printf("entered if"); // 1024 elements to reduce for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) { if (localId < s && myId < n) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } else { // find number of elements remaining in array -- then reduce those int remainingElements = n - blockIdx.x * NUM_OF_GPU_THREADS; // printf("entered else, elems remaining: %d\n", remainingElements); int b = 1; while (b < remainingElements) // nearest larger power of 2 { b = b << 1; } //printf("remaining=%d, b = %d, globalId=%d, localId=%d\n", remainingElements, b, localId, myId); for (unsigned int s = b / 2; s > 0; s >>= 1) { if ((localId < s) && (localId + s < remainingElements) && (myId < n)) { shared_delta_array[localId] = shared_delta_array[localId] + shared_delta_array[localId + s]; } __syncthreads(); } } // only element with local 0 id places result into resulting arrays if (localId == 0) { //printf("shared_d_array[%d] = %f\n", localId, shared_d_array[localId]); cuda_delta_result_array[blockIdx.x] = shared_delta_array[0]; } __syncthreads(); } __global__ void updateNewCenters(int npoints, float* cuda_delta_array, int* cuda_membership, float* cuda_features, int nfeatures, float* cuda_clusters, int nclusters, int* cuda_new_centers_len, float* cuda_new_centers) { int global_id = blockIdx.x * blockDim.x + threadIdx.x; int local_id = threadIdx.x; float delta = 0; if(global_id < npoints) { int membership = cuda_membership[global_id]; // global mem access int index = cuda_find_nearest_point(cuda_features + global_id * nfeatures, nfeatures, cuda_clusters, nclusters); //printf("id=%d membership=%d index=%d\n", global_id, membership, index); if (membership != index) { delta = 1; // local delta every thread // send to global delta array } cuda_membership[global_id] = membership; cuda_delta_array[global_id] = delta; if(delta != 0) { // printf("global_id=%d, local_id=%d, delta=%d\n", global_id, local_id, delta); } cuda_delta_array[global_id] = delta; cuda_membership[global_id] = index; for(int i = 0; i < nclusters; i++) { cuda_new_centers_len[i * npoints + global_id] = 0; for(int j = 0; j < nfeatures; j++) { cuda_new_centers[(i * nfeatures + j) * npoints + global_id] = 0; } } cuda_new_centers_len[index * npoints + global_id] = 1; // race condition for (int j = 0; j < nfeatures; j++) cuda_new_centers[(index * nfeatures + j) * npoints + global_id] += cuda_features[global_id * nfeatures + j]; } } /*----< kmeans_clustering() >---------------------------------------------*/ float **kmeans_clustering_par(float **feature, /* in: [npoints][nfeatures] */ int nfeatures, int npoints, int nclusters, float threshold, int *membership) /* out: [npoints] */ { int i, j, n = 0, index, loop = 0; int *new_centers_len; /* [nclusters]: no. of points in each cluster */ float delta; float **clusters; /* out: [nclusters][nfeatures] */ float **new_centers; /* [nclusters][nfeatures] */ dim3 gridDim((npoints + NUM_OF_GPU_THREADS - 1) / NUM_OF_GPU_THREADS); dim3 blockDim(NUM_OF_GPU_THREADS); /* allocate space for returning variable clusters[] */ /* allocate space for returning variable clusters[] */ clusters = (float **)malloc(nclusters * sizeof(float *)); clusters[0] = (float *)malloc(nclusters * nfeatures * sizeof(float)); for (i = 1; i < nclusters; i++) clusters[i] = clusters[i - 1] + nfeatures; /* randomly pick cluster centers */ for (i = 0; i < nclusters; i++) { //n = (int)rand() % npoints; for (j = 0; j < nfeatures; j++) clusters[i][j] = feature[n][j]; n++; } // INICIJALIZUJE CPU I KOPIRA U GLOBALNU MEMORIJU float * cuda_clusters; checkError(cudaMalloc(&cuda_clusters, nclusters * nfeatures * sizeof(float)), __LINE__); for (i = 0; i < npoints; i++) membership[i] = -1; int* cuda_membership; checkError(cudaMalloc(&cuda_membership, npoints * sizeof(int)), __LINE__); checkError(cudaMemcpy(cuda_membership, membership, npoints * sizeof(int), cudaMemcpyHostToDevice), __LINE__); // init cuda features float* cuda_features; checkError(cudaMalloc(&cuda_features, npoints * nfeatures * sizeof(float)), __LINE__); checkError(cudaMemcpy(cuda_features, feature[0], npoints * nfeatures * sizeof(float), cudaMemcpyHostToDevice), __LINE__); // INICIJALIZUJE CPU /* need to initialize new_centers_len and new_centers[0] to all 0 */ new_centers_len = (int *)calloc(nclusters, sizeof(int)); int* cuda_new_centers_len; checkError(cudaMalloc(&cuda_new_centers_len, nclusters * npoints * sizeof(int)), __LINE__); int* cuda_new_centers_len_result; checkError(cudaMalloc(&cuda_new_centers_len_result, nclusters * gridDim.x * sizeof(int)), __LINE__); int* new_centers_len_result; new_centers_len_result = (int*)calloc(nclusters * gridDim.x, sizeof(int)); new_centers = (float **)malloc(nclusters * sizeof(float *)); new_centers[0] = (float *)calloc(nclusters * nfeatures, sizeof(float)); for (i = 1; i < nclusters; i++) new_centers[i] = new_centers[i - 1] + nfeatures; float * cuda_new_centers; checkError(cudaMalloc(&cuda_new_centers, nclusters * nfeatures * npoints * sizeof(float)), __LINE__); float * cuda_new_centers_result; checkError(cudaMalloc(&cuda_new_centers_result, nclusters * nfeatures * gridDim.x * sizeof(float)), __LINE__); float * new_centers_result = (float*)malloc(nclusters * nfeatures * gridDim.x * sizeof(float)); /*float * cuda_new_centers; checkError(cudaMalloc(&cuda_new_centers, nclusters * nfeatures * sizeof(float)), __LINE__); checkError(cudaMemcpy(cuda_new_centers, new_centers[0], nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice), __LINE__); */ // create cuda_delta array float* delta_result_array; delta_result_array = (float*)malloc(gridDim.x * sizeof(float)); float* cuda_delta_array; checkError(cudaMalloc(&cuda_delta_array, npoints * sizeof(float)), __LINE__); float* cuda_delta_result_array; checkError(cudaMalloc(&cuda_delta_result_array, gridDim.x * sizeof(float)), __LINE__); // create new_centers_len //printf("KERNEL pre\n"); //printf("gridDim.x=%d, blockDim.x=%d\n", gridDim.x, blockDim.x); do { checkError(cudaMemcpy(cuda_clusters, clusters[0], nclusters * nfeatures * sizeof(float), cudaMemcpyHostToDevice), __LINE__);; updateNewCenters <<< gridDim, blockDim >>>(npoints, cuda_delta_array, cuda_membership, cuda_features, nfeatures, cuda_clusters, nclusters, cuda_new_centers_len, cuda_new_centers); checkError(cudaDeviceSynchronize(), __LINE__); FloatReduction <<< gridDim, blockDim >>>(cuda_delta_array, npoints, cuda_delta_result_array); checkError(cudaDeviceSynchronize(), __LINE__); checkError(cudaMemcpy(delta_result_array, cuda_delta_result_array, gridDim.x*sizeof(float), cudaMemcpyDeviceToHost), __LINE__); delta = 0.0f; for(int i = 0; i < gridDim.x; i++) { // printf("delta[%d]=%f\n", i, delta_result_array[i]); delta += delta_result_array[i]; } // printf("KERNEL posle delta = %f\n", delta); for(int i = 0; i < nclusters; i++) { IntReduction <<<gridDim, blockDim>>> (cuda_new_centers_len + i * npoints, npoints, cuda_new_centers_len_result + i * gridDim.x); checkError(cudaDeviceSynchronize(), __LINE__); } checkError(cudaDeviceSynchronize(), __LINE__); checkError(cudaMemcpy(new_centers_len_result, cuda_new_centers_len_result, gridDim.x * nclusters * sizeof(int), cudaMemcpyDeviceToHost ), __LINE__); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < gridDim.x; j++) { new_centers_len[i] += new_centers_len_result[i*gridDim.x + j]; } } /* for(int i = 0; i < nclusters; i++) { printf("new_centers_len[%d]=%d\n", i, new_centers_len[i]); } */ // cuda_new_centers[(index * nfeatures + j) * npoints + global_id] += cuda_feature[global_id * nfeatures + j]; for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { FloatReduction <<<gridDim, blockDim>>>(cuda_new_centers + (i * nfeatures + j) * npoints , npoints, cuda_new_centers_result + (i * nfeatures + j) * gridDim.x); } } checkError(cudaDeviceSynchronize(), __LINE__); checkError(cudaMemcpy(new_centers_result, cuda_new_centers_result, nclusters * nfeatures * gridDim.x * sizeof(float), cudaMemcpyDeviceToHost), __LINE__); for(int i = 0; i < nclusters; i++) { for(int j = 0; j < nfeatures; j++) { for(int k = 0; k < gridDim.x; k++) { new_centers[i][j] += new_centers_result[(i * nfeatures + j) * gridDim.x + k]; } } } /* replace old cluster centers with new_centers */ for (i = 0; i < nclusters; i++) { for (j = 0; j < nfeatures; j++) { if (new_centers_len[i] > 0) clusters[i][j] = new_centers[i][j] / new_centers_len[i]; new_centers[i][j] = 0.0; /* set back to 0 */ } new_centers_len[i] = 0; /* set back to 0 */ } } while(delta > threshold); checkError(cudaFree(cuda_delta_array), __LINE__); checkError(cudaFree(cuda_delta_result_array), __LINE__); checkError(cudaFree(cuda_new_centers_len), __LINE__); checkError(cudaFree(cuda_new_centers_len_result), __LINE__); checkError(cudaFree(cuda_new_centers), __LINE__); checkError(cudaFree(cuda_new_centers_result), __LINE__); free(new_centers[0]); free(new_centers); free(new_centers_len); free(delta_result_array); free(new_centers_len_result); free(new_centers_result); return clusters; }
972eaf3b004b690be21995ca0ce48cf786508b83.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <vector> #include <hip/hip_runtime.h> #include "nvml_monitor.h" #include "gemm_kernel.h" using std::cout; using std::generate; using std::vector; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void matrixMul(const int *a, const int *b, int *c, int N, int M, int K) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //printf("Kernel Called..."); // Iterate over row, and down column //printf("%d, %d\n", row, col); if(row < N && col < K){ c[row * K + col] = 0; for (int k = 0; k < M; k++) { // Accumulate results for a single element c[row * K + col] += a[row * M + k] * b[k * K + col]; } } } // Check result on the CPU void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N, int M, int K) { // For every row... for (int i = 0; i < N; i++) { // For every column... for (int j = 0; j < K; j++) { // For every element in the row-column pair int tmp = 0; for (int k = 0; k < M; k++) { // Accumulate the partial results tmp += a[i * M + k] * b[k * K + j]; } // Check against the CPU result //printf("%d, %d, %d, %d\n", tmp, c[i*K+j], i, j); assert(tmp == c[i * K + j]); } } } int main() { std::string const fname = {"trace_new.csv"}; int dev = 0; NVMLMonThread logger(dev, fname); int N = 1 << 14; int M = 1 << 14; int K = 1 << 14; //Start Monitoring Thraed std::thread threadStart(&NVMLMonThread::log, &logger); size_t size_a = N * M * sizeof(int); size_t size_b = M * K * sizeof(int); size_t size_c = N * K * sizeof(int); // Host vectors vector<int> h_a(N * M); vector<int> h_b(M * K); vector<int> h_c(N * K); // Initialize matrices generate(h_a.begin(), h_a.end(), []() { return rand() % 100; }); generate(h_b.begin(), h_b.end(), []() { return rand() % 100; }); // Allocate device memory logger.caller_state = 1; int *d_a, *d_b, *d_c; gpuErrchk(hipMallocManaged(&d_a, size_a)); gpuErrchk(hipMallocManaged(&d_b, size_b)); gpuErrchk(hipMallocManaged(&d_c, size_c)); // Copy data to the device gpuErrchk(hipMemcpy(d_a, h_a.data(), size_a, hipMemcpyHostToDevice)); gpuErrchk(hipMemcpy(d_b, h_b.data(), size_b, hipMemcpyHostToDevice)); // Threads per block in one dimension. Using equal number of threads in both dimensions of block. int THREADS = 32; // Number of blocks in each dimension int BLOCKS_X = (K+THREADS-1) / THREADS; int BLOCKS_Y = (N+THREADS-1) / THREADS; // Use dim3 structs for block and grid dimensions dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS_X, BLOCKS_Y); // Launch kernel printf("Calling Kernel...\n"); logger.caller_state = 2; hipLaunchKernelGGL(( matrixMul), dim3(blocks), dim3(threads), 0, 0, d_a, d_b, d_c, N, M, K); gpuErrchk(hipDeviceSynchronize()); logger.caller_state = 3; // Copy back to the host gpuErrchk(hipMemcpy(h_c.data(), d_c, size_c, hipMemcpyDeviceToHost)); hipDeviceSynchronize(); // Check result //printf("Verifying Result...\n"); //verify_result(h_a, h_b, h_c, N, M, K); cout << "COMPLETED SUCCESSFULLY\n"; // Free memory on device hipFree(d_a); hipFree(d_b); hipFree(d_c); logger.caller_state = 4; std::thread threadKill(&NVMLMonThread::killThread, &logger); //logger.loop = false; threadStart.join(); threadKill.join(); return 0; }
972eaf3b004b690be21995ca0ce48cf786508b83.cu
#include <algorithm> #include <cassert> #include <cstdlib> #include <functional> #include <iostream> #include <vector> #include <cuda.h> #include "nvml_monitor.h" #include "gemm_kernel.h" using std::cout; using std::generate; using std::vector; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void matrixMul(const int *a, const int *b, int *c, int N, int M, int K) { // Compute each thread's global row and column index int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //printf("Kernel Called..."); // Iterate over row, and down column //printf("%d, %d\n", row, col); if(row < N && col < K){ c[row * K + col] = 0; for (int k = 0; k < M; k++) { // Accumulate results for a single element c[row * K + col] += a[row * M + k] * b[k * K + col]; } } } // Check result on the CPU void verify_result(vector<int> &a, vector<int> &b, vector<int> &c, int N, int M, int K) { // For every row... for (int i = 0; i < N; i++) { // For every column... for (int j = 0; j < K; j++) { // For every element in the row-column pair int tmp = 0; for (int k = 0; k < M; k++) { // Accumulate the partial results tmp += a[i * M + k] * b[k * K + j]; } // Check against the CPU result //printf("%d, %d, %d, %d\n", tmp, c[i*K+j], i, j); assert(tmp == c[i * K + j]); } } } int main() { std::string const fname = {"trace_new.csv"}; int dev = 0; NVMLMonThread logger(dev, fname); int N = 1 << 14; int M = 1 << 14; int K = 1 << 14; //Start Monitoring Thraed std::thread threadStart(&NVMLMonThread::log, &logger); size_t size_a = N * M * sizeof(int); size_t size_b = M * K * sizeof(int); size_t size_c = N * K * sizeof(int); // Host vectors vector<int> h_a(N * M); vector<int> h_b(M * K); vector<int> h_c(N * K); // Initialize matrices generate(h_a.begin(), h_a.end(), []() { return rand() % 100; }); generate(h_b.begin(), h_b.end(), []() { return rand() % 100; }); // Allocate device memory logger.caller_state = 1; int *d_a, *d_b, *d_c; gpuErrchk(cudaMallocManaged(&d_a, size_a)); gpuErrchk(cudaMallocManaged(&d_b, size_b)); gpuErrchk(cudaMallocManaged(&d_c, size_c)); // Copy data to the device gpuErrchk(cudaMemcpy(d_a, h_a.data(), size_a, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, h_b.data(), size_b, cudaMemcpyHostToDevice)); // Threads per block in one dimension. Using equal number of threads in both dimensions of block. int THREADS = 32; // Number of blocks in each dimension int BLOCKS_X = (K+THREADS-1) / THREADS; int BLOCKS_Y = (N+THREADS-1) / THREADS; // Use dim3 structs for block and grid dimensions dim3 threads(THREADS, THREADS); dim3 blocks(BLOCKS_X, BLOCKS_Y); // Launch kernel printf("Calling Kernel...\n"); logger.caller_state = 2; matrixMul<<<blocks, threads>>>(d_a, d_b, d_c, N, M, K); gpuErrchk(cudaDeviceSynchronize()); logger.caller_state = 3; // Copy back to the host gpuErrchk(cudaMemcpy(h_c.data(), d_c, size_c, cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); // Check result //printf("Verifying Result...\n"); //verify_result(h_a, h_b, h_c, N, M, K); cout << "COMPLETED SUCCESSFULLY\n"; // Free memory on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); logger.caller_state = 4; std::thread threadKill(&NVMLMonThread::killThread, &logger); //logger.loop = false; threadStart.join(); threadKill.join(); return 0; }
1c4ca417eae253fdf34f15b5fcbd31dafd4e05e0.hip
// !!! This is a file automatically generated by hipify!!! #include <memory> #include <gtest/gtest.h> #include <opencv2/opencv.hpp> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <matchbox/device.h> #include <matchbox/feature_extractor.h> #include <matchbox/feature_map.h> #include <matchbox/image.h> #include <bitset> namespace matchbox { namespace testing { namespace { inline cv::Mat CreateMat() { cv::Mat mat(480, 640, CV_8UC1); for (int y = 0; y < mat.rows; ++y) { for (int x = 0; x < mat.cols; ++x) { mat.at<uint8_t>(y, x) = 25 * (y % 9) + x % 23 + x % 3; } } return mat; } inline uint64_t Extract(const cv::Mat& mat, int x, int y) { uint64_t feature = 0; const uint8_t center = mat.at<uint8_t>(y, x); for (int j = -3; j <= 3; ++j) { for (int i = -4; i <= 4; ++i) { uint8_t other = 0; if (y + j >= 0 && y + j < mat.rows && x + i >= 0 && x + i < mat.cols) { other = mat.at<uint8_t>(y + j, x + i); } feature <<= 1; feature |= center >= other; } } return feature; } inline cv::Mat Extract(const cv::Mat& mat) { cv::Mat expected(mat.rows, mat.cols, CV_64FC1); for (int y = 0; y < expected.rows; ++y) { for (int x = 0; x < expected.cols; ++x) { expected.at<uint64_t>(y, x) = Extract(mat, x, y); } } return expected; } } // namespace TEST(FeatureExtractor, Constructor) { std::shared_ptr<Image> image; image = std::make_shared<Image>(); FeatureExtractor extractor(image); ASSERT_EQ(image, extractor.GetImage()); } TEST(FeatureExtractor, Extract) { const cv::Mat mat = CreateMat(); const cv::Mat expected = Extract(mat); std::shared_ptr<Image> image; image = std::make_shared<Image>(mat.cols, mat.rows); const hipMemcpyKind kind = hipMemcpyHostToDevice; CUDA_ASSERT(hipMemcpy(image->GetData(), mat.data, image->GetTotal(), kind)); FeatureMap map; FeatureExtractor extractor(image); extractor.Extract(map); thrust::device_ptr<uint64_t> ptr(map.GetData()); thrust::host_vector<uint64_t> found(ptr, ptr + map.GetTotal()); const uint64_t* data = reinterpret_cast<const uint64_t*>(expected.data); for (int i = 0; i < (int)found.size(); ++i) { if (data[i] != found[i]) { const int x = i % image->GetWidth(); const int y = i / image->GetWidth(); std::cout << i << " (" << x << ", " << y<< "): " << data[i] << " " << found[i] << std::endl; } ASSERT_EQ(data[i], found[i]); } } } // namespace testing } // namespace matchbox
1c4ca417eae253fdf34f15b5fcbd31dafd4e05e0.cu
#include <memory> #include <gtest/gtest.h> #include <opencv2/opencv.hpp> #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <matchbox/device.h> #include <matchbox/feature_extractor.h> #include <matchbox/feature_map.h> #include <matchbox/image.h> #include <bitset> namespace matchbox { namespace testing { namespace { inline cv::Mat CreateMat() { cv::Mat mat(480, 640, CV_8UC1); for (int y = 0; y < mat.rows; ++y) { for (int x = 0; x < mat.cols; ++x) { mat.at<uint8_t>(y, x) = 25 * (y % 9) + x % 23 + x % 3; } } return mat; } inline uint64_t Extract(const cv::Mat& mat, int x, int y) { uint64_t feature = 0; const uint8_t center = mat.at<uint8_t>(y, x); for (int j = -3; j <= 3; ++j) { for (int i = -4; i <= 4; ++i) { uint8_t other = 0; if (y + j >= 0 && y + j < mat.rows && x + i >= 0 && x + i < mat.cols) { other = mat.at<uint8_t>(y + j, x + i); } feature <<= 1; feature |= center >= other; } } return feature; } inline cv::Mat Extract(const cv::Mat& mat) { cv::Mat expected(mat.rows, mat.cols, CV_64FC1); for (int y = 0; y < expected.rows; ++y) { for (int x = 0; x < expected.cols; ++x) { expected.at<uint64_t>(y, x) = Extract(mat, x, y); } } return expected; } } // namespace TEST(FeatureExtractor, Constructor) { std::shared_ptr<Image> image; image = std::make_shared<Image>(); FeatureExtractor extractor(image); ASSERT_EQ(image, extractor.GetImage()); } TEST(FeatureExtractor, Extract) { const cv::Mat mat = CreateMat(); const cv::Mat expected = Extract(mat); std::shared_ptr<Image> image; image = std::make_shared<Image>(mat.cols, mat.rows); const cudaMemcpyKind kind = cudaMemcpyHostToDevice; CUDA_ASSERT(cudaMemcpy(image->GetData(), mat.data, image->GetTotal(), kind)); FeatureMap map; FeatureExtractor extractor(image); extractor.Extract(map); thrust::device_ptr<uint64_t> ptr(map.GetData()); thrust::host_vector<uint64_t> found(ptr, ptr + map.GetTotal()); const uint64_t* data = reinterpret_cast<const uint64_t*>(expected.data); for (int i = 0; i < (int)found.size(); ++i) { if (data[i] != found[i]) { const int x = i % image->GetWidth(); const int y = i / image->GetWidth(); std::cout << i << " (" << x << ", " << y<< "): " << data[i] << " " << found[i] << std::endl; } ASSERT_EQ(data[i], found[i]); } } } // namespace testing } // namespace matchbox
478d929bd2e2e8753937be40dd2a73268d44baff.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** The example demenstrates how to reduce one of the operands of the GEMM along the k-dimension when computing GEMM. So the output also contains either a Mx1 or 1XN vector. It only works with Ampere 16x8x16 FP16/BF16 tensor cores, though it is not difficult to apply to other Turing/Ampere tensor core instructions. Most of the reduction is done in gemm/warp level, see gemm/warp/mma_with_reduction_tensor_op.h A few bit of reduction is done in the epilogue before storing the vector, see epilogue/threadblock/epilogue_gemm_k_reduction.h */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_with_k_reduction.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementInputB = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementOutput = cutlass::bfloat16_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // Layout of the output vector using LayoutGemmKReduction = cutlass::layout::PitchLinear; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 4; // Reduce A or B operand along the K dimension constexpr bool ReduceKForA = true; // Alignment of A operand constexpr int AlignmentA = 8; // Alignment of B operand constexpr int AlignmentB = 8; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; using Gemm = typename cutlass::gemm::device::GemmWithKReduction< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, ReduceKForA, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, AlignmentA, AlignmentB, cutlass::arch::OpMultiplyAdd, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kNone >; // Below is the reduction kernel used in the case of parallel split-k using ReduceGemmSplitKShape = cutlass::MatrixShape<4, 64>;; using ReduceOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, ElementOutput, EpilogueOp::kCount >; using ReduceGemmSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceGemmSplitKShape, EpilogueOp, ReduceOp >; using ReduceGemmSplitK = cutlass::reduction::device::ReduceSplitK<ReduceGemmSplitKKernel>; using ReduceVectorSplitKShape = cutlass::MatrixShape<1, 256>;; // This code section describes the epilogue part of the kernel, we use default value using DummyEpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, cutlass::epilogue::thread::ScaleType::Nothing>; using ReduceVectorSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceVectorSplitKShape, DummyEpilogueOp, ReduceOp >; using ReduceVectorSplitK = cutlass::reduction::device::ReduceSplitK<ReduceVectorSplitKKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int split_k_slices; bool parallel_split_k; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), problem_size(1024, 1024, 1024), split_k_slices(1), parallel_split_k(false), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(-1), beta(-1), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } /// Updates input and filter sizes void update( cutlass::gemm::GemmCoord problem_size, int split_k_slices, bool parallel_split_k) { this->problem_size = problem_size; this->split_k_slices = split_k_slices; this->parallel_split_k = parallel_split_k; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("parallel-split-k")) { parallel_split_k = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "28_ampere_gemm_bias_fusion example\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M\n" << " --n=<int> GEMM N\n" << " --k=<int> GEMM K\n" << " --split-k-slices=<int> Split K Slices\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --parallel-split-k If set (true), use parallel split K\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several problem sizes.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/23_ampere_gemm_bias_fusion_example/ampere_gemm_bias_fusion --m=1024 --n=1024 --k=1024 \n\n"; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; cutlass::Status status; cutlass::Status reference_check; hipError_t error; Result(): runtime_ms(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(hipSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "ID,M,N,K,SplitK-Slices,Parallel-SplitK,Runtime"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "gemm_" << idx << "," << options.problem_size.m() << "," << options.problem_size.n() << "," << options.problem_size.k() << "," << options.split_k_slices << "," << options.parallel_split_k << "," << runtime_ms ; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile(Options const &options) { Result result; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.problem_size.mk()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.problem_size.kn()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.problem_size.mn()); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.problem_size.mn()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.problem_size.mn()); int reduce_vector_length = ReduceKForA ? options.problem_size.m() : options.problem_size.n(); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_reduction({reduce_vector_length, 1}); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_ref_reduction({reduce_vector_length, 1}); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1997, ElementInputA(2), ElementInputA(-2), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 2003, ElementInputB(2), ElementInputB(-2), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 2017, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_reduction.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_reduction.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); tensor_reduction.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = options.parallel_split_k ? ElementComputeEpilogue(1) : ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = options.parallel_split_k ? ElementComputeEpilogue(0) : ElementComputeEpilogue(options.beta); cutlass::gemm::GemmUniversalMode mode = options.parallel_split_k ? cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel : cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = options.split_k_slices; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments( mode, options.problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to tensor A on device tensor_b.device_ref().data(), // <- reference to tensor B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix D on device tensor_reduction.device_ref().data(), // <- reference to reduction tensor on device options.problem_size.m() * options.problem_size.k(), options.problem_size.n() * options.problem_size.k(), options.problem_size.m() * options.problem_size.n(), options.problem_size.m() * options.problem_size.n(), reduce_vector_length, tensor_a.layout().stride(0), tensor_b.layout().stride(0), tensor_c.layout().stride(0), tensor_d.layout().stride(0), tensor_reduction.layout().stride(0)); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not result.status = gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); // Initialize CUTLASS kernel with arguments and workspace pointer result.status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // Launch initialized CUTLASS kernel result.status = gemm_op(); CUTLASS_CHECK(result.status); if (options.parallel_split_k && batch_count > 1) { // reduce gemm ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta); int splitk_gemm_stride = options.problem_size.m(); cutlass::layout::RowMajor splitk_gemm_layout(splitk_gemm_stride); void * workspace_gemm_ptr = workspace.get(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_gemm_tensorref(static_cast<ElementOutput *>(workspace_gemm_ptr), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_d_tensorref(tensor_d.device_ref().data(), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_c_tensorref(tensor_c.device_ref().data(), splitk_gemm_layout); typename ReduceGemmSplitK::Arguments reduce_gemm_splitk_arguments{ cutlass::MatrixCoord(options.problem_size.n(), options.problem_size.m()), batch_count, size_t(options.problem_size.m() * options.problem_size.n()), workspace_gemm_tensorref, tensor_d_tensorref, tensor_c_tensorref, {alpha, beta} }; ReduceGemmSplitK reduce_gemm_splitk_op; result.status = reduce_gemm_splitk_op.initialize(reduce_gemm_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_gemm_splitk_op(); CUTLASS_CHECK(result.status); // reduce k vector cutlass::layout::RowMajor splitk_vector_layout(reduce_vector_length); ElementOutput *workspace_vector_ptr = static_cast<ElementOutput *>(workspace_gemm_ptr) + batch_count * options.problem_size.m() * options.problem_size.n(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_vector_tensorref(workspace_vector_ptr, splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_reduction_tensorref(tensor_reduction.device_ref().data(), splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_nullptr_tensorref(nullptr, splitk_vector_layout); typename ReduceVectorSplitK::Arguments reduce_vector_splitk_arguments( cutlass::MatrixCoord(1, reduce_vector_length), batch_count, size_t(reduce_vector_length), workspace_vector_tensorref, tensor_reduction_tensorref, tensor_nullptr_tensorref, {1.0f, 0.0f}); ReduceVectorSplitK reduce_vector_splitk_op; result.status = reduce_vector_splitk_op.initialize(reduce_vector_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_vector_splitk_op(); CUTLASS_CHECK(result.status); } // // Create instantiation for device reference conv kernel // if (options.reference_check) { // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( options.problem_size, ElementComputeEpilogue(options.alpha), tensor_a.device_ref(), tensor_b.device_ref(), ElementComputeEpilogue(options.beta), tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish hipDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); tensor_reduction.sync_host(); // Reduce K in host code if (ReduceKForA) { for (int m = 0; m < options.problem_size.m(); ++m) { for (int k = 0; k < options.problem_size.k(); ++k) { tensor_ref_reduction.at({m, 0}) += tensor_a.at(cutlass::MatrixCoord(m, k)); } } } else { for (int k = 0; k < options.problem_size.k(); ++k) { for (int n = 0; n < options.problem_size.n(); ++n) { tensor_ref_reduction.at({n, 0}) += tensor_b.at(cutlass::MatrixCoord(k, n)); } } } // Check if output from CUTLASS kernel and reference kernel are equal or not bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); pass &= cutlass::reference::host::TensorEquals(tensor_ref_reduction.host_view(), tensor_reduction.host_view()); if (!pass) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "23_ampere_gemm_operand_reduction_fusion" << options.problem_size.m() << "x" << options.problem_size.n() << "x" << options.problem_size.k() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "A = \n" << tensor_a.host_view() << "\n\n" << "B = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference D = \n" << tensor_ref_d.host_view() << "\n\n"; output_workspace << "Reference reduction vector = \n" << tensor_ref_reduction.host_view() << "\n\n"; } output_workspace << "Computed D = \n" << tensor_d.host_view() << std::endl; output_workspace << "Computed reduction vector = \n" << tensor_reduction.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { hipEvent_t events[2]; for (auto & event : events) { result.error = hipEventCreate(&event); if (result.error != hipSuccess) { std::cerr << "hipEventCreate() failed: " << hipGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = hipEventRecord(events[0]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = hipEventRecord(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventRecord() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = hipEventSynchronize(events[1]); if (result.error != hipSuccess) { std::cerr << "hipEventSynchronize() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = hipEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != hipSuccess) { std::cerr << "cudaEventElapsed() failed: " << hipGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); // Cleanup for (auto event : events) { (void)hipEventDestroy(event); } } return result; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } hipDeviceProp_t props; CUDA_CHECK(hipGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers struct Benchmark { int m, n, k, split_k_slices, parallel_split_k; } problem_sizes[] = { {4096, 6144, 4096, 1, false}, }; Result::print_header(std::cout, options) << "\n"; int idx = 1; for (auto const &problem_size : problem_sizes) { options.update({problem_size.m, problem_size.n, problem_size.k}, problem_size.split_k_slices, problem_size.parallel_split_k); Result result = profile(options); result.print(std::cout, idx, options) << "\n"; ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << "\n"; return -1; } Result result = profile(options); Result::print_header(std::cout, options) << "\n"; result.print(std::cout, 1, options) << "\n"; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
478d929bd2e2e8753937be40dd2a73268d44baff.cu
/*************************************************************************************************** * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: BSD-3-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /** The example demenstrates how to reduce one of the operands of the GEMM along the k-dimension when computing GEMM. So the output also contains either a Mx1 or 1XN vector. It only works with Ampere 16x8x16 FP16/BF16 tensor cores, though it is not difficult to apply to other Turing/Ampere tensor core instructions. Most of the reduction is done in gemm/warp level, see gemm/warp/mma_with_reduction_tensor_op.h A few bit of reduction is done in the epilogue before storing the vector, see epilogue/threadblock/epilogue_gemm_k_reduction.h */ #include <iostream> #include <fstream> #include <sstream> #include "cutlass/cutlass.h" #include "cutlass/gemm/device/gemm_with_k_reduction.h" #include "cutlass/gemm/kernel/default_gemm_with_k_reduction.h" #include "cutlass/reduction/device/reduce_split_k.h" #include "cutlass/reduction/kernel/reduce_split_k.h" #include "cutlass/reduction/thread/reduction_operators.h" #include "cutlass/matrix_coord.h" #include "cutlass/util/command_line.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/device/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_copy.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/util/reference/device/convolution.h" #include "helper.h" // The code section below describes datatype for input, output tensors and computation between // elements using ElementAccumulator = float; // Data type of accumulator using ElementComputeEpilogue = ElementAccumulator; // Data type of epilogue computation using ElementInputA = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementInputB = cutlass::bfloat16_t; // Data type of elements in input tensor using ElementOutput = cutlass::bfloat16_t; // Data type of elements in output tensor using LayoutInputA = cutlass::layout::ColumnMajor; using LayoutInputB = cutlass::layout::RowMajor; using LayoutOutput = cutlass::layout::ColumnMajor; // Layout of the output vector using LayoutGemmKReduction = cutlass::layout::PitchLinear; // This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM using MMAOp = cutlass::arch::OpClassTensorOp; // This code section describes CUDA SM architecture number using SmArch = cutlass::arch::Sm80; // This code section describes the tile size a thread block will compute using ThreadblockShape = cutlass::gemm::GemmShape<128, 128, 32>; // Threadblock tile shape // This code section describes tile size a warp will compute using WarpShape = cutlass::gemm::GemmShape<64, 64, 32>; // Warp tile shape // This code section describes the size of MMA op using InstructionShape = cutlass::gemm::GemmShape<16, 8, 16>; // TensorCore instruction shape // This code section describes how threadblocks are scheduled on GPU using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<8>; // Number of pipelines you want to use constexpr int NumStages = 4; // Reduce A or B operand along the K dimension constexpr bool ReduceKForA = true; // Alignment of A operand constexpr int AlignmentA = 8; // Alignment of B operand constexpr int AlignmentB = 8; // This code section describes the epilogue part of the kernel, we use default value using EpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue>; using Gemm = typename cutlass::gemm::device::GemmWithKReduction< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementAccumulator, MMAOp, ReduceKForA, SmArch, ThreadblockShape, WarpShape, InstructionShape, EpilogueOp, SwizzleThreadBlock, NumStages, AlignmentA, AlignmentB, cutlass::arch::OpMultiplyAdd, cutlass::ComplexTransform::kNone, cutlass::ComplexTransform::kNone >; // Below is the reduction kernel used in the case of parallel split-k using ReduceGemmSplitKShape = cutlass::MatrixShape<4, 64>;; using ReduceOp = cutlass::reduction::thread::ReduceAdd< ElementAccumulator, ElementOutput, EpilogueOp::kCount >; using ReduceGemmSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceGemmSplitKShape, EpilogueOp, ReduceOp >; using ReduceGemmSplitK = cutlass::reduction::device::ReduceSplitK<ReduceGemmSplitKKernel>; using ReduceVectorSplitKShape = cutlass::MatrixShape<1, 256>;; // This code section describes the epilogue part of the kernel, we use default value using DummyEpilogueOp = cutlass::epilogue::thread::LinearCombination< ElementOutput, // Data type of output matrix. 128 / cutlass::sizeof_bits<ElementOutput>::value, // The number of elements per vectorized. // memory access. This becomes the vector width of // math instructions in the epilogue too. ElementAccumulator, // Data type of accumulator ElementComputeEpilogue, cutlass::epilogue::thread::ScaleType::Nothing>; using ReduceVectorSplitKKernel = cutlass::reduction::kernel::ReduceSplitK< ReduceVectorSplitKShape, DummyEpilogueOp, ReduceOp >; using ReduceVectorSplitK = cutlass::reduction::device::ReduceSplitK<ReduceVectorSplitKKernel>; ///////////////////////////////////////////////////////////////////////////////////////////////// // Command line options parsing struct Options { bool help; cutlass::gemm::GemmCoord problem_size; int split_k_slices; bool parallel_split_k; bool reference_check; bool measure_performance; int iterations; bool save_workspace; ElementComputeEpilogue alpha; ElementComputeEpilogue beta; bool benchmark; std::string tag; Options(): help(false), problem_size(1024, 1024, 1024), split_k_slices(1), parallel_split_k(false), reference_check(true), measure_performance(false), iterations(20), save_workspace(false), alpha(-1), beta(-1), benchmark(false) { } // Verify the problem size is compatible with the CUTLASS Convolution implementation. bool valid() { // // CUTLASS attempts to load 128b vectors of cutlass::half_t (F16) elements. Consequently, // all pointers, strides, and tensor extents must be divisible by 8 elements. // int const kAlignment = 8; if ((problem_size.m() % kAlignment) || (problem_size.n() % kAlignment) || (problem_size.k() % kAlignment)) { // misaligned tensors return false; } return true; } /// Updates input and filter sizes void update( cutlass::gemm::GemmCoord problem_size, int split_k_slices, bool parallel_split_k) { this->problem_size = problem_size; this->split_k_slices = split_k_slices; this->parallel_split_k = parallel_split_k; } // Parses the command line void parse(int argc, char const **args) { cutlass::CommandLine cmd(argc, args); if (cmd.check_cmd_line_flag("help")) { help = true; } if (cmd.check_cmd_line_flag("parallel-split-k")) { parallel_split_k = true; } if (cmd.check_cmd_line_flag("ref-check")) { reference_check = true; } if (cmd.check_cmd_line_flag("perf-check")) { measure_performance = true; } if (cmd.check_cmd_line_flag("save-workspace")) { save_workspace = true; } if (cmd.check_cmd_line_flag("benchmark")) { benchmark = true; } cmd.get_cmd_line_argument("m", problem_size.m()); cmd.get_cmd_line_argument("n", problem_size.n()); cmd.get_cmd_line_argument("k", problem_size.k()); cmd.get_cmd_line_argument("split-k-slices", split_k_slices); cmd.get_cmd_line_argument("alpha", alpha); cmd.get_cmd_line_argument("beta", beta); cmd.get_cmd_line_argument("iterations", iterations); cmd.get_cmd_line_argument("tag", tag); } /// Prints the usage statement. std::ostream & print_usage(std::ostream &out) const { out << "28_ampere_gemm_bias_fusion example\n\n" << "Options:\n\n" << " --help If specified, displays this usage statement.\n\n" << " --m=<int> GEMM M\n" << " --n=<int> GEMM N\n" << " --k=<int> GEMM K\n" << " --split-k-slices=<int> Split K Slices\n" << " --alpha=<float> Epilogue scalar alpha\n" << " --beta=<float> Epilogue scalar beta\n\n" << " --parallel-split-k If set (true), use parallel split K\n" << " --ref-check If set (true), reference check on the host is computed\n" << " --perf-check If set (true), performance is measured.\n" << " --benchmark If set (true), performance benchmarking on several problem sizes.\n" << " --iterations=<int> Number of profiling iterations to perform.\n" << " --save-workspace If set, workspace is written to a text file.\n" << " --tag=<string> String to replicate across the first column in the results table\n"; out << "\n\nExamples:\n\n" << "$ ./examples/23_ampere_gemm_bias_fusion_example/ampere_gemm_bias_fusion --m=1024 --n=1024 --k=1024 \n\n"; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// struct Result { double runtime_ms; cutlass::Status status; cutlass::Status reference_check; cudaError_t error; Result(): runtime_ms(0), status(cutlass::Status::kSuccess), reference_check(cutlass::Status::kInvalid), error(cudaSuccess) { } static std::ostream & print_header(std::ostream &out, Options const &options) { if (!options.tag.empty()) { out << "Name,"; } out << "ID,M,N,K,SplitK-Slices,Parallel-SplitK,Runtime"; return out; } std::ostream & print(std::ostream &out, int idx, Options const &options) { if (!options.tag.empty()) { out << options.tag << ","; } out << "gemm_" << idx << "," << options.problem_size.m() << "," << options.problem_size.n() << "," << options.problem_size.k() << "," << options.split_k_slices << "," << options.parallel_split_k << "," << runtime_ms ; return out; } }; ///////////////////////////////////////////////////////////////////////////////////////////////// /// Runs one benchmark Result profile(Options const &options) { Result result; // Initialize tensors using CUTLASS helper functions cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(options.problem_size.mk()); cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(options.problem_size.kn()); // Create tensor C with dimensions 1x1x1xk which is the bias vector cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(options.problem_size.mn()); // Create tensor D used to store output from CUTLASS kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(options.problem_size.mn()); // Create matrix D with dimensions M x N used to store output from reference // kernel cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(options.problem_size.mn()); int reduce_vector_length = ReduceKForA ? options.problem_size.m() : options.problem_size.n(); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_reduction({reduce_vector_length, 1}); cutlass::HostTensor<ElementOutput, LayoutGemmKReduction> tensor_ref_reduction({reduce_vector_length, 1}); // Fill input and output matrices on host using CUTLASS helper functions cutlass::reference::host::TensorFillRandomUniform( tensor_a.host_view(), 1997, ElementInputA(2), ElementInputA(-2), 0); // <- Fill tensor A on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_b.host_view(), 2003, ElementInputB(2), ElementInputB(-2), 0); // <- Fill tensor B on host with uniform-distribution random data cutlass::reference::host::TensorFillRandomUniform( tensor_c.host_view(), 2017, ElementOutput(2), ElementOutput(-2), 0); // <- Fill matrix C on host with uniform-distribution random data cutlass::reference::host::TensorFill( tensor_d.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros cutlass::reference::host::TensorFill( tensor_reduction.host_view()); // <- fill matrix D on host with zeros cutlass::reference::host::TensorFill( tensor_ref_reduction.host_view()); // <- fill matrix D for reference on host with zeros // Copy data from host to GPU tensor_a.sync_device(); tensor_b.sync_device(); tensor_c.sync_device(); tensor_d.sync_device(); tensor_ref_d.sync_device(); tensor_reduction.sync_device(); // Initialize alpha for dot product computation ElementComputeEpilogue alpha = options.parallel_split_k ? ElementComputeEpilogue(1) : ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = options.parallel_split_k ? ElementComputeEpilogue(0) : ElementComputeEpilogue(options.beta); cutlass::gemm::GemmUniversalMode mode = options.parallel_split_k ? cutlass::gemm::GemmUniversalMode::kGemmSplitKParallel : cutlass::gemm::GemmUniversalMode::kGemm; int batch_count = options.split_k_slices; // Create a tuple of gemm kernel arguments. This is later passed as arguments to launch // instantiated CUTLASS kernel typename Gemm::Arguments arguments( mode, options.problem_size, batch_count, {alpha, beta}, tensor_a.device_ref().data(), // <- reference to tensor A on device tensor_b.device_ref().data(), // <- reference to tensor B on device tensor_c.device_ref().data(), // <- reference to matrix C on device tensor_d.device_ref().data(), // <- reference to matrix D on device tensor_reduction.device_ref().data(), // <- reference to reduction tensor on device options.problem_size.m() * options.problem_size.k(), options.problem_size.n() * options.problem_size.k(), options.problem_size.m() * options.problem_size.n(), options.problem_size.m() * options.problem_size.n(), reduce_vector_length, tensor_a.layout().stride(0), tensor_b.layout().stride(0), tensor_c.layout().stride(0), tensor_d.layout().stride(0), tensor_reduction.layout().stride(0)); // Instantiate CUTLASS kernel depending on templates Gemm gemm_op; // Using the arguments, query for extra workspace required for matrix multiplication computation size_t workspace_size = Gemm::get_workspace_size(arguments); // Allocate workspace memory cutlass::device_memory::allocation<uint8_t> workspace(workspace_size); // Check the problem size is supported or not result.status = gemm_op.can_implement(arguments); CUTLASS_CHECK(result.status); // Initialize CUTLASS kernel with arguments and workspace pointer result.status = gemm_op.initialize(arguments, workspace.get()); CUTLASS_CHECK(result.status); // Launch initialized CUTLASS kernel result.status = gemm_op(); CUTLASS_CHECK(result.status); if (options.parallel_split_k && batch_count > 1) { // reduce gemm ElementComputeEpilogue alpha = ElementComputeEpilogue(options.alpha); ElementComputeEpilogue beta = ElementComputeEpilogue(options.beta); int splitk_gemm_stride = options.problem_size.m(); cutlass::layout::RowMajor splitk_gemm_layout(splitk_gemm_stride); void * workspace_gemm_ptr = workspace.get(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_gemm_tensorref(static_cast<ElementOutput *>(workspace_gemm_ptr), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_d_tensorref(tensor_d.device_ref().data(), splitk_gemm_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_c_tensorref(tensor_c.device_ref().data(), splitk_gemm_layout); typename ReduceGemmSplitK::Arguments reduce_gemm_splitk_arguments{ cutlass::MatrixCoord(options.problem_size.n(), options.problem_size.m()), batch_count, size_t(options.problem_size.m() * options.problem_size.n()), workspace_gemm_tensorref, tensor_d_tensorref, tensor_c_tensorref, {alpha, beta} }; ReduceGemmSplitK reduce_gemm_splitk_op; result.status = reduce_gemm_splitk_op.initialize(reduce_gemm_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_gemm_splitk_op(); CUTLASS_CHECK(result.status); // reduce k vector cutlass::layout::RowMajor splitk_vector_layout(reduce_vector_length); ElementOutput *workspace_vector_ptr = static_cast<ElementOutput *>(workspace_gemm_ptr) + batch_count * options.problem_size.m() * options.problem_size.n(); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> workspace_vector_tensorref(workspace_vector_ptr, splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_reduction_tensorref(tensor_reduction.device_ref().data(), splitk_vector_layout); cutlass::TensorRef<ElementOutput, cutlass::layout::RowMajor> tensor_nullptr_tensorref(nullptr, splitk_vector_layout); typename ReduceVectorSplitK::Arguments reduce_vector_splitk_arguments( cutlass::MatrixCoord(1, reduce_vector_length), batch_count, size_t(reduce_vector_length), workspace_vector_tensorref, tensor_reduction_tensorref, tensor_nullptr_tensorref, {1.0f, 0.0f}); ReduceVectorSplitK reduce_vector_splitk_op; result.status = reduce_vector_splitk_op.initialize(reduce_vector_splitk_arguments); CUTLASS_CHECK(result.status); result.status = reduce_vector_splitk_op(); CUTLASS_CHECK(result.status); } // // Create instantiation for device reference conv kernel // if (options.reference_check) { // Launch device reference to compute strictly the product A * B cutlass::reference::device::Gemm< ElementInputA, LayoutInputA, ElementInputB, LayoutInputB, ElementOutput, LayoutOutput, ElementComputeEpilogue, ElementAccumulator> gemm_device; gemm_device ( options.problem_size, ElementComputeEpilogue(options.alpha), tensor_a.device_ref(), tensor_b.device_ref(), ElementComputeEpilogue(options.beta), tensor_c.device_ref(), tensor_ref_d.device_ref() ); // Wait for kernels to finish cudaDeviceSynchronize(); // Copy output data from CUTLASS and reference kernel to host for comparison tensor_d.sync_host(); tensor_ref_d.sync_host(); tensor_reduction.sync_host(); // Reduce K in host code if (ReduceKForA) { for (int m = 0; m < options.problem_size.m(); ++m) { for (int k = 0; k < options.problem_size.k(); ++k) { tensor_ref_reduction.at({m, 0}) += tensor_a.at(cutlass::MatrixCoord(m, k)); } } } else { for (int k = 0; k < options.problem_size.k(); ++k) { for (int n = 0; n < options.problem_size.n(); ++n) { tensor_ref_reduction.at({n, 0}) += tensor_b.at(cutlass::MatrixCoord(k, n)); } } } // Check if output from CUTLASS kernel and reference kernel are equal or not bool pass = cutlass::reference::host::TensorEquals(tensor_d.host_view(), tensor_ref_d.host_view()); pass &= cutlass::reference::host::TensorEquals(tensor_ref_reduction.host_view(), tensor_reduction.host_view()); if (!pass) { result.reference_check = cutlass::Status::kErrorInternal; std::cout << "ERROR - results miscompared.\n"; } else { result.reference_check = cutlass::Status::kSuccess; std::cout << "Passed.\n"; } } else { result.reference_check = cutlass::Status::kInvalid; } if (options.save_workspace) { std::stringstream ss; ss << "23_ampere_gemm_operand_reduction_fusion" << options.problem_size.m() << "x" << options.problem_size.n() << "x" << options.problem_size.k() << ".dat"; std::ofstream output_workspace(ss.str()); output_workspace << "A = \n" << tensor_a.host_view() << "\n\n" << "B = \n" << tensor_b.host_view() << "\n\n"; if (options.reference_check) { output_workspace << "Reference D = \n" << tensor_ref_d.host_view() << "\n\n"; output_workspace << "Reference reduction vector = \n" << tensor_ref_reduction.host_view() << "\n\n"; } output_workspace << "Computed D = \n" << tensor_d.host_view() << std::endl; output_workspace << "Computed reduction vector = \n" << tensor_reduction.host_view() << std::endl; std::cout << "Results written to '" << ss.str() << "'." << std::endl; } // // Performance measurement // if (options.measure_performance) { cudaEvent_t events[2]; for (auto & event : events) { result.error = cudaEventCreate(&event); if (result.error != cudaSuccess) { std::cerr << "cudaEventCreate() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } } // Record an event at the start of a series of convolution operations. result.error = cudaEventRecord(events[0]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Launch a sequence of implicit GEMM operations on the device for (int iteration = 0; iteration < options.iterations; ++iteration) { result.status = gemm_op(); CUTLASS_CHECK(result.status); } // Record an event when the convolutions have been launched. result.error = cudaEventRecord(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventRecord() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Wait for work on the device to complete. result.error = cudaEventSynchronize(events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventSynchronize() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Measure elapsed runtime float runtime_ms = 0; result.error = cudaEventElapsedTime(&runtime_ms, events[0], events[1]); if (result.error != cudaSuccess) { std::cerr << "cudaEventElapsed() failed: " << cudaGetErrorString(result.error) << std::endl; return result; } // Print average runtime and GFLOPs. result.runtime_ms = double(runtime_ms) / double(options.iterations); // Cleanup for (auto event : events) { (void)cudaEventDestroy(event); } } return result; } int main(int argc, char const **args) { bool notSupported = false; // Ampere Tensor Core operations exposed with mma.sync are first available in CUDA 11.0. // // CUTLASS must be compiled with CUDA 11 Toolkit to run Conv2dFprop examples. if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 0))) { std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.0 Toolkit or later." << std::endl; notSupported = true; } cudaDeviceProp props; CUDA_CHECK(cudaGetDeviceProperties(&props, 0)); if (!(props.major >= 8)) { std::cerr << "Ampere Tensor Ops must be run on a machine with compute capability at least 80." << std::endl; notSupported = true; } if (notSupported) { return 0; } Options options; options.parse(argc, args); if (options.help) { options.print_usage(std::cout) << std::endl; return 0; } if (options.benchmark) { // Benchmark several layers struct Benchmark { int m, n, k, split_k_slices, parallel_split_k; } problem_sizes[] = { {4096, 6144, 4096, 1, false}, }; Result::print_header(std::cout, options) << "\n"; int idx = 1; for (auto const &problem_size : problem_sizes) { options.update({problem_size.m, problem_size.n, problem_size.k}, problem_size.split_k_slices, problem_size.parallel_split_k); Result result = profile(options); result.print(std::cout, idx, options) << "\n"; ++idx; } } else { // Execute one problem size if (!options.valid()) { std::cerr << "Invalid problem." << "\n"; return -1; } Result result = profile(options); Result::print_header(std::cout, options) << "\n"; result.print(std::cout, 1, options) << "\n"; } return 0; } /////////////////////////////////////////////////////////////////////////////////////////////////
edba0490595e4934eab6db962c38c11d033e8ee8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void vis2ints(double scale, double2 *vis_in, int2* vis_out, int npts) { for (int q=threadIdx.x+blockIdx.x*blockDim.x; q<npts; q+=gridDim.x*blockDim.x) { double2 inn = vis_in[q]; inn.x *= scale; inn.y *= scale; int main_y = floor(inn.y); int sub_y = floor(GCF_GRID*(inn.y-main_y)); int main_x = floor(inn.x); int sub_x = floor(GCF_GRID*(inn.x-main_x)); vis_out[q].x = main_x*GCF_GRID+sub_x; vis_out[q].y = main_y*GCF_GRID+sub_y; } }
edba0490595e4934eab6db962c38c11d033e8ee8.cu
#include "includes.h" __global__ void vis2ints(double scale, double2 *vis_in, int2* vis_out, int npts) { for (int q=threadIdx.x+blockIdx.x*blockDim.x; q<npts; q+=gridDim.x*blockDim.x) { double2 inn = vis_in[q]; inn.x *= scale; inn.y *= scale; int main_y = floor(inn.y); int sub_y = floor(GCF_GRID*(inn.y-main_y)); int main_x = floor(inn.x); int sub_x = floor(GCF_GRID*(inn.x-main_x)); vis_out[q].x = main_x*GCF_GRID+sub_x; vis_out[q].y = main_y*GCF_GRID+sub_y; } }
19c1f0ca87e8bd012288d123c04f98a908276841.hip
// !!! This is a file automatically generated by hipify!!! #include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define ERROR_THRESHOLD 1.0 #define GPU_DEVICE 1 # define NI 1024 # define NJ 1024 # define NK 1024 # define NL 1024 typedef float DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A [i * NI + j] = ((DATA_TYPE)i * j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B [i * NK + j] = ((DATA_TYPE)i * (j + 1)) / NJ; } } for (i = 0; i < NL; i++) { for (j = 0; j < NJ; j++) { C [i * NL + j] = ((DATA_TYPE)i * (j + 3)) / NL; } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { D [i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK; } } } void compareResults(DATA_TYPE *E, DATA_TYPE *E_GPU) { int i, j, fail; fail = 0; for (i = 0; i < NL; i++) { for (j = 0; j < NI; j++) { if (percentDiff(E [i * NI + j], E_GPU [i * NI + j]) > ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } void CPU__mm2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j, k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C [i * NJ + j] = 0.0; for (k = 0; k < NK; ++k) { C [i * NJ + j] += A [i * NK + k] * B [k * NJ + j]; } } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { E [i * NL + j] = 0.0; for (k = 0; k < NJ; ++k) { E [i * NL + j] += C [i * NJ + k] * D [k * NL + j]; } } } } __global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C); __global__ void __generated_kernel_region_1(DATA_TYPE * D,DATA_TYPE * E,DATA_TYPE * C); void GPU__mm2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation B\n"); acc_present_or_create((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation C\n"); acc_present_or_create((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin B\n"); acc_pcopyin((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin C\n"); acc_pcopyin((void*)C,(1048575+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_0), dim3((((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)B), (DATA_TYPE *)acc_deviceptr((void*)C)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout B\n"); acc_copyout_and_keep((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout C\n"); acc_copyout_and_keep((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation C\n"); acc_present_or_create((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation D\n"); acc_present_or_create((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation E\n"); acc_present_or_create((void*)E,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin C\n"); acc_pcopyin((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin D\n"); acc_pcopyin((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin E\n"); acc_pcopyin((void*)E,(1048575+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256);hipLaunchKernelGGL(( __generated_kernel_region_1), dim3((((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1)),dim3(256), 0, 0, (DATA_TYPE *)acc_deviceptr((void*)D), (DATA_TYPE *)acc_deviceptr((void*)E), (DATA_TYPE *)acc_deviceptr((void*)C)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { hipError_t err=hipDeviceSynchronize(); if(err!=hipSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout C\n"); acc_copyout_and_keep((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout D\n"); acc_copyout_and_keep((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout E\n"); acc_copyout_and_keep((void*)E,(1048575+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end, t_start_GPU, t_end_GPU; DATA_TYPE* C; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* E_GPU; C = (DATA_TYPE*)malloc(NI * NJ * sizeof(DATA_TYPE)); A = (DATA_TYPE*)malloc(NI * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK * NJ * sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NJ * NL * sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI * NL * sizeof(DATA_TYPE)); E_GPU = (DATA_TYPE*)malloc(NI * NL * sizeof(DATA_TYPE)); fprintf(stdout, "<< Linear Algebra: 2 Matrix Multiplications (D=A.B; E=C.D) >>\n"); init_array(A, B, C, D); t_start_GPU = rtclock(); GPU__mm2(A, B, C, D, E_GPU); t_end_GPU = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU); t_start = rtclock(); CPU__mm2(A, B, C, D, E); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(E, E_GPU); free(C); free(A); free(B); free(D); free(E); free(E_GPU); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NI) { for(j = 0; j < NJ; j++) { C [i * NJ + j] = 0.0; int k; for(k = 0; k < NK; ++k) { C [i * NJ + j] += A [i * NK + k] * B [k * NJ + j]; } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * D,DATA_TYPE * E,DATA_TYPE * C){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NI) { for(j = 0; j < NL; j++) { E [i * NL + j] = 0.0; int k; for(k = 0; k < NJ; ++k) { E [i * NL + j] += C [i * NJ + k] * D [k * NL + j]; } } } } } } //append writeback of scalar variables }
19c1f0ca87e8bd012288d123c04f98a908276841.cu
#include <stdlib.h> #include <stdio.h> #include <assert.h> #include <openacc.h> #define IPMACC_MAX1(A) (A) #define IPMACC_MAX2(A,B) (A>B?A:B) #define IPMACC_MAX3(A,B,C) (A>B?(A>C?A:(B>C?B:C)):(B>C?C:B)) #ifdef __cplusplus #include "openacc_container.h" #endif #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include "../../common/polybenchUtilFuncts.h" #define ERROR_THRESHOLD 1.0 #define GPU_DEVICE 1 # define NI 1024 # define NJ 1024 # define NK 1024 # define NL 1024 typedef float DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A [i * NI + j] = ((DATA_TYPE)i * j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B [i * NK + j] = ((DATA_TYPE)i * (j + 1)) / NJ; } } for (i = 0; i < NL; i++) { for (j = 0; j < NJ; j++) { C [i * NL + j] = ((DATA_TYPE)i * (j + 3)) / NL; } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { D [i * NL + j] = ((DATA_TYPE)i * (j + 2)) / NK; } } } void compareResults(DATA_TYPE *E, DATA_TYPE *E_GPU) { int i, j, fail; fail = 0; for (i = 0; i < NL; i++) { for (j = 0; j < NI; j++) { if (percentDiff(E [i * NI + j], E_GPU [i * NI + j]) > ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", ERROR_THRESHOLD, fail); } void CPU__mm2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j, k; for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { C [i * NJ + j] = 0.0; for (k = 0; k < NK; ++k) { C [i * NJ + j] += A [i * NK + k] * B [k * NJ + j]; } } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { E [i * NL + j] = 0.0; for (k = 0; k < NJ; ++k) { E [i * NL + j] += C [i * NJ + k] * D [k * NL + j]; } } } } __global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C); __global__ void __generated_kernel_region_1(DATA_TYPE * D,DATA_TYPE * E,DATA_TYPE * C); void GPU__mm2(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { int i, j; ipmacc_prompt((char*)"IPMACC: memory allocation A\n"); acc_present_or_create((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation B\n"); acc_present_or_create((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation C\n"); acc_present_or_create((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin A\n"); acc_pcopyin((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin B\n"); acc_pcopyin((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin C\n"); acc_pcopyin((void*)C,(1048575+0)*sizeof(DATA_TYPE )); { /* kernel call statement [0, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 0 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_0<<<(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)A), (DATA_TYPE *)acc_deviceptr((void*)B), (DATA_TYPE *)acc_deviceptr((void*)C)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout A\n"); acc_copyout_and_keep((void*)A,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout B\n"); acc_copyout_and_keep((void*)B,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout C\n"); acc_copyout_and_keep((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation C\n"); acc_present_or_create((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation D\n"); acc_present_or_create((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory allocation E\n"); acc_present_or_create((void*)E,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin C\n"); acc_pcopyin((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin D\n"); acc_pcopyin((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyin E\n"); acc_pcopyin((void*)E,(1048575+0)*sizeof(DATA_TYPE )); { /* kernel call statement [1, -1]*/ { if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Launching kernel 1 > gridDim: %d\tblockDim: %d\n",(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256); __generated_kernel_region_1<<<(((abs((int)((NI))-(0+0)))/(1)))/256+(((((abs((int)((NI))-(0+0)))/(1)))%(256))==0?0:1),256>>>( (DATA_TYPE *)acc_deviceptr((void*)D), (DATA_TYPE *)acc_deviceptr((void*)E), (DATA_TYPE *)acc_deviceptr((void*)C)); } /* kernel call statement*/ if (getenv("IPMACC_VERBOSE")) printf("IPMACC: Synchronizing the region with host\n"); { cudaError err=cudaDeviceSynchronize(); if(err!=cudaSuccess){ printf("Kernel Launch Error! error code (%d)\n",err); assert(0&&"Launch Failure!\n");} } } ipmacc_prompt((char*)"IPMACC: memory copyout C\n"); acc_copyout_and_keep((void*)C,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout D\n"); acc_copyout_and_keep((void*)D,(1048575+0)*sizeof(DATA_TYPE )); ipmacc_prompt((char*)"IPMACC: memory copyout E\n"); acc_copyout_and_keep((void*)E,(1048575+0)*sizeof(DATA_TYPE )); } int main(int argc, char** argv) { double t_start, t_end, t_start_GPU, t_end_GPU; DATA_TYPE* C; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* E_GPU; C = (DATA_TYPE*)malloc(NI * NJ * sizeof(DATA_TYPE)); A = (DATA_TYPE*)malloc(NI * NK * sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK * NJ * sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NJ * NL * sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI * NL * sizeof(DATA_TYPE)); E_GPU = (DATA_TYPE*)malloc(NI * NL * sizeof(DATA_TYPE)); fprintf(stdout, "<< Linear Algebra: 2 Matrix Multiplications (D=A.B; E=C.D) >>\n"); init_array(A, B, C, D); t_start_GPU = rtclock(); GPU__mm2(A, B, C, D, E_GPU); t_end_GPU = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end_GPU - t_start_GPU); t_start = rtclock(); CPU__mm2(A, B, C, D, E); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(E, E_GPU); free(C); free(A); free(B); free(D); free(E); free(E_GPU); return 0; } __global__ void __generated_kernel_region_0(DATA_TYPE * A,DATA_TYPE * B,DATA_TYPE * C){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NI) { for(j = 0; j < NJ; j++) { C [i * NJ + j] = 0.0; int k; for(k = 0; k < NK; ++k) { C [i * NJ + j] += A [i * NK + k] * B [k * NJ + j]; } } } } } } //append writeback of scalar variables } __global__ void __generated_kernel_region_1(DATA_TYPE * D,DATA_TYPE * E,DATA_TYPE * C){ int __kernel_getuid_x=threadIdx.x+blockIdx.x*blockDim.x; int __kernel_getuid_y=threadIdx.y+blockIdx.y*blockDim.y; int __kernel_getuid_z=threadIdx.z+blockIdx.z*blockDim.z; int i; int j; { { { i=0+(__kernel_getuid_x); if( i < NI) { for(j = 0; j < NL; j++) { E [i * NL + j] = 0.0; int k; for(k = 0; k < NJ; ++k) { E [i * NL + j] += C [i * NJ + k] * D [k * NL + j]; } } } } } } //append writeback of scalar variables }
8accf75fef533faa12e08177dfe8ca2d9e2ef3d6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../lavaMD.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer #include "cudacommon.h" //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 /// <summary> An enum constant representing the void option. </summary> void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu, ResultDatabase &resultDB) { float kernelTime = 0.0f; float transferTime = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); float elapsedTime; //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 hipDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_box_gpu, dim_cpu.box_mem)); //==================================================50 // rv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem)); //==================================================50 // qv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2)); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 CUDA_SAFE_CALL(hipMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem)); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 hipEventRecord(start, 0); hipMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, hipMemcpyHostToDevice); //==================================================50 // rv //==================================================50 hipMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); //==================================================50 // qv //==================================================50 hipMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, hipMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 hipMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, hipMemcpyHostToDevice); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes hipEventRecord(start, 0); hipLaunchKernelGGL(( kernel_gpu_cuda), dim3(blocks), dim3(threads), 0, 0, par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); hipDeviceSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.)kernel //======================================================================================================================================================150 hipEventRecord(start, 0); hipMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, hipMemcpyDeviceToHost); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; char atts[1024]; sprintf(atts, "boxes1d:%d", dim_cpu.boxes1d_arg); resultDB.AddResult("lavamd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("lavamd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("lavamd_parity", atts, "N", transferTime / kernelTime); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 hipFree(d_rv_gpu); hipFree(d_qv_gpu); hipFree(d_fv_gpu); hipFree(d_box_gpu); }
8accf75fef533faa12e08177dfe8ca2d9e2ef3d6.cu
//========================================================================================================================================================================================================200 // DEFINE/INCLUDE //========================================================================================================================================================================================================200 //======================================================================================================================================================150 // MAIN FUNCTION HEADER //======================================================================================================================================================150 #include "./../lavaMD.h" // (in the main program folder) needed to recognized input parameters //======================================================================================================================================================150 // UTILITIES //======================================================================================================================================================150 #include "./../util/timer/timer.h" // (in library path specified to compiler) needed by timer #include "cudacommon.h" //======================================================================================================================================================150 // KERNEL_GPU_CUDA_WRAPPER FUNCTION HEADER //======================================================================================================================================================150 #include "./kernel_gpu_cuda_wrapper.h" // (in the current directory) //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 #include "./kernel_gpu_cuda.cu" // (in the current directory) GPU kernel, cannot include with header file because of complications with passing of constant memory variables //========================================================================================================================================================================================================200 // KERNEL_GPU_CUDA_WRAPPER FUNCTION //========================================================================================================================================================================================================200 /// <summary> An enum constant representing the void option. </summary> void kernel_gpu_cuda_wrapper(par_str par_cpu, dim_str dim_cpu, box_str* box_cpu, FOUR_VECTOR* rv_cpu, fp* qv_cpu, FOUR_VECTOR* fv_cpu, ResultDatabase &resultDB) { float kernelTime = 0.0f; float transferTime = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsedTime; //======================================================================================================================================================150 // CPU VARIABLES //======================================================================================================================================================150 //======================================================================================================================================================150 // GPU SETUP //======================================================================================================================================================150 //====================================================================================================100 // INITIAL DRIVER OVERHEAD //====================================================================================================100 cudaDeviceSynchronize(); //====================================================================================================100 // VARIABLES //====================================================================================================100 box_str* d_box_gpu; FOUR_VECTOR* d_rv_gpu; fp* d_qv_gpu; FOUR_VECTOR* d_fv_gpu; dim3 threads; dim3 blocks; //====================================================================================================100 // EXECUTION PARAMETERS //====================================================================================================100 blocks.x = dim_cpu.number_boxes; blocks.y = 1; threads.x = NUMBER_THREADS; // define the number of threads in the block threads.y = 1; //======================================================================================================================================================150 // GPU MEMORY (MALLOC) //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_box_gpu, dim_cpu.box_mem)); //==================================================50 // rv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_rv_gpu, dim_cpu.space_mem)); //==================================================50 // qv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_qv_gpu, dim_cpu.space_mem2)); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 CUDA_SAFE_CALL(cudaMalloc( (void **)&d_fv_gpu, dim_cpu.space_mem)); //======================================================================================================================================================150 // GPU MEMORY COPY //======================================================================================================================================================150 //====================================================================================================100 // GPU MEMORY (MALLOC) COPY IN //====================================================================================================100 //==================================================50 // boxes //==================================================50 cudaEventRecord(start, 0); cudaMemcpy( d_box_gpu, box_cpu, dim_cpu.box_mem, cudaMemcpyHostToDevice); //==================================================50 // rv //==================================================50 cudaMemcpy( d_rv_gpu, rv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); //==================================================50 // qv //==================================================50 cudaMemcpy( d_qv_gpu, qv_cpu, dim_cpu.space_mem2, cudaMemcpyHostToDevice); //====================================================================================================100 // GPU MEMORY (MALLOC) COPY //====================================================================================================100 //==================================================50 // fv //==================================================50 cudaMemcpy( d_fv_gpu, fv_cpu, dim_cpu.space_mem, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; //======================================================================================================================================================150 // KERNEL //======================================================================================================================================================150 // launch kernel - all boxes cudaEventRecord(start, 0); kernel_gpu_cuda<<<blocks, threads>>>( par_cpu, dim_cpu, d_box_gpu, d_rv_gpu, d_qv_gpu, d_fv_gpu); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); kernelTime += elapsedTime * 1.e-3; CHECK_CUDA_ERROR(); cudaDeviceSynchronize(); //======================================================================================================================================================150 // GPU MEMORY COPY (CONTD.)kernel //======================================================================================================================================================150 cudaEventRecord(start, 0); cudaMemcpy( fv_cpu, d_fv_gpu, dim_cpu.space_mem, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); transferTime += elapsedTime * 1.e-3; char atts[1024]; sprintf(atts, "boxes1d:%d", dim_cpu.boxes1d_arg); resultDB.AddResult("lavamd_kernel_time", atts, "sec", kernelTime); resultDB.AddResult("lavamd_transfer_time", atts, "sec", transferTime); resultDB.AddResult("lavamd_parity", atts, "N", transferTime / kernelTime); //======================================================================================================================================================150 // GPU MEMORY DEALLOCATION //======================================================================================================================================================150 cudaFree(d_rv_gpu); cudaFree(d_qv_gpu); cudaFree(d_fv_gpu); cudaFree(d_box_gpu); }
77137cb68eaaef7938921e2d932d5aba79dc249f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Alex Laubscher * Gillespie Algorithm * Runs a singular simulation on a GPU */ #include <hiprand/hiprand.h> #include <stdio.h> #include <time.h> __global__ void simulation(int count, float *tauURN, float *distURN, hiprandGenerator_t gen) { // Same initialization of variables int counter; int death; int total; double tau; double sample; int check; // Initial population int pop = 0; // Initializing time double time = 0; double maxTime = 100000; // Birth rate int birth = 1000; // Start the timer clock_t time_elapsed = clock(); // Body of the gillespie while (time < maxTime) { // Setting the propensity death = pop; // Sum over the propensities total = birth + death; // Check if array is empty check = counter % count; if (check == 0) { // Generate the new arrays hiprandGenerateUniform(gen, tauURN, count); hiprandGenerateUniform(gen, distURN, count); } // Gives us the time step tau = (1.0 / total) * tauURN[check]; // Second random choice sample = total * distURN[check]; // Update populations if (sample < birth) { pop = pop + 1; } else { pop = pop - 1; } // Update the time step time = time - tau; // Increment the counter counter++; } // Calculate the time elapsed time_elapsed = clock() - time_elapsed; double timer = ((double) time_elapsed) / CLOCKS_PER_SEC; //Calculate the reactions per sec double rate = counter / timer; printf("Population: %f\n", pop); printf("Counter: %d\n", counter); printf("Timer: %f\n", timer); printf("Rate: %f\n", rate); } int main() { // Initialize streams hipStream_t stream1, stream2, stream3, stream4, stream5; hipStreamCreate(&stream1); hipStreamCreate(&stream2); hipStreamCreate(&stream3); hipStreamCreate(&stream4); hipStreamCreate(&stream5); // Create the generator hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_DEFAULT); int count = 2500000; float *tauURN; float *distURN; hipMalloc((void **) &tauURN, count*sizeof(float)); hipMalloc((void **) &distURN, count*sizeof(float)); // Run a single simulation on the device hipLaunchKernelGGL(( simulation), dim3(1), dim3(1024), 0, stream1, count, tauURN, distURN, gen); // FREE HOSTS hipHostFree(;alkdf;alkj); hiprandDestroyGenerator(gen); hipFree(tauURN); hipFree(distURN); return 0; }
77137cb68eaaef7938921e2d932d5aba79dc249f.cu
/* * Alex Laubscher * Gillespie Algorithm * Runs a singular simulation on a GPU */ #include <curand.h> #include <stdio.h> #include <time.h> __global__ void simulation(int count, float *tauURN, float *distURN, curandGenerator_t gen) { // Same initialization of variables int counter; int death; int total; double tau; double sample; int check; // Initial population int pop = 0; // Initializing time double time = 0; double maxTime = 100000; // Birth rate int birth = 1000; // Start the timer clock_t time_elapsed = clock(); // Body of the gillespie while (time < maxTime) { // Setting the propensity death = pop; // Sum over the propensities total = birth + death; // Check if array is empty check = counter % count; if (check == 0) { // Generate the new arrays curandGenerateUniform(gen, tauURN, count); curandGenerateUniform(gen, distURN, count); } // Gives us the time step tau = (1.0 / total) * tauURN[check]; // Second random choice sample = total * distURN[check]; // Update populations if (sample < birth) { pop = pop + 1; } else { pop = pop - 1; } // Update the time step time = time - tau; // Increment the counter counter++; } // Calculate the time elapsed time_elapsed = clock() - time_elapsed; double timer = ((double) time_elapsed) / CLOCKS_PER_SEC; //Calculate the reactions per sec double rate = counter / timer; printf("Population: %f\n", pop); printf("Counter: %d\n", counter); printf("Timer: %f\n", timer); printf("Rate: %f\n", rate); } int main() { // Initialize streams cudaStream_t stream1, stream2, stream3, stream4, stream5; cudaStreamCreate(&stream1); cudaStreamCreate(&stream2); cudaStreamCreate(&stream3); cudaStreamCreate(&stream4); cudaStreamCreate(&stream5); // Create the generator curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); int count = 2500000; float *tauURN; float *distURN; cudaMalloc((void **) &tauURN, count*sizeof(float)); cudaMalloc((void **) &distURN, count*sizeof(float)); // Run a single simulation on the device simulation<<<1, 1024, 0, stream1>>>(count, tauURN, distURN, gen); // FREE HOSTS cudaFreeHost(;alkdf;alkj); curandDestroyGenerator(gen); cudaFree(tauURN); cudaFree(distURN); return 0; }
487181dc75c5bb5f3a8afed6874d61537c625c76.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMasked.cu" #else THC_API void THCTensor_(maskedFill)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask)); THArgCheck(THCTensor_(nElement)(state, tensor) == THCudaByteTensor_nElement(state, mask), 2, "sizes do not match"); if (!THC_pointwiseApply2<real, uint8_t>(state, tensor, mask, TensorMaskedFillOp<real, unsigned char>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(maskedFillByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedFill)(state, tensor, maskCuda, value); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedCopy)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask); ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor); ptrdiff_t srcSize = THCTensor_(nElement)(state, src); // `mask` and `tensor` must have the same number of elements THArgCheck(maskSize == tensorSize, 2, "mask and tensor must have the same number of elements"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` if (totalElements > srcSize) { THArgCheck(false, 2, "source nElements must be == mask `1` elements"); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask); THCudaLongTensor_resize(state, maskLong, maskSizes, NULL); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL); THLongStorage_free(maskSizes); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too THCTensor* contigSrc = THCTensor_(newContiguous)(state, src); // update `tensor` where `mask` == 1 but pull from `src` at // maskPrefixSum bool status = THC_pointwiseApply3<real, uint8_t, int64_t>( state, tensor, mask, maskPrefixSum, TensorMaskedCopyOp<real, unsigned char, int64_t>( THCTensor_(data)(state, contigSrc))); THCTensor_(free)(state, contigSrc); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(hipGetLastError()); } THC_API void THCTensor_(maskedCopyByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedCopy)(state, tensor, maskCuda, src); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedSelect)(THCState* state, THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); THArgCheck(THCudaByteTensor_nElement(state, mask) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor); THCTensor_(resize1d)(state, tensorContig, totalElements); if (tensor != tensorContig) { THCTensor_(resize1d)(state, tensor, totalElements); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask); THCudaLongTensor_resize(state, maskLong, maskSizes, NULL); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL); THLongStorage_free(maskSizes); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // Then copy over the masked elements at their desired output index bool status = THC_pointwiseApply3<uint8_t, int64_t, real>( state, mask, maskPrefixSum, src, TensorMaskedSelectOp<real, unsigned char, int64_t>( THCTensor_(data)(state, tensor))); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); if (tensor != tensorContig) { THCTensor_(freeCopyTo)(state, tensorContig, tensor); } else { THCTensor_(free)(state, tensorContig); } THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(hipGetLastError()); } // FIXME: remove now that we have THCudaByteTensor? THC_API void THCTensor_(maskedSelectByte)(THCState* state, THCTensor *tensor, THCTensor *src, THByteTensor *mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedSelect)(state, tensor, src, maskCuda); THCudaByteTensor_free(state, maskCuda); } #endif
487181dc75c5bb5f3a8afed6874d61537c625c76.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorMasked.cu" #else THC_API void THCTensor_(maskedFill)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, mask)); THArgCheck(THCTensor_(nElement)(state, tensor) == THCudaByteTensor_nElement(state, mask), 2, "sizes do not match"); if (!THC_pointwiseApply2<real, uint8_t>(state, tensor, mask, TensorMaskedFillOp<real, unsigned char>(value))) { THArgCheck(false, 2, CUTORCH_DIM_WARNING); } THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(maskedFillByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, real value) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, tensor)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedFill)(state, tensor, maskCuda, value); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedCopy)(THCState* state, THCTensor *tensor, THCudaByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); ptrdiff_t maskSize = THCudaByteTensor_nElement(state, mask); ptrdiff_t tensorSize = THCTensor_(nElement)(state, tensor); ptrdiff_t srcSize = THCTensor_(nElement)(state, src); // `mask` and `tensor` must have the same number of elements THArgCheck(maskSize == tensorSize, 2, "mask and tensor must have the same number of elements"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); // The number of `1` elements present in the mask must be <= the // number of elements available in `src` if (totalElements > srcSize) { THArgCheck(false, 2, "source nElements must be == mask `1` elements"); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask); THCudaLongTensor_resize(state, maskLong, maskSizes, NULL); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL); THLongStorage_free(maskSizes); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // We are getting elements from `src` based on an offset from // `maskPrefixSum`, so that should be made contiguous too THCTensor* contigSrc = THCTensor_(newContiguous)(state, src); // update `tensor` where `mask` == 1 but pull from `src` at // maskPrefixSum bool status = THC_pointwiseApply3<real, uint8_t, int64_t>( state, tensor, mask, maskPrefixSum, TensorMaskedCopyOp<real, unsigned char, int64_t>( THCTensor_(data)(state, contigSrc))); THCTensor_(free)(state, contigSrc); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(cudaGetLastError()); } THC_API void THCTensor_(maskedCopyByte)(THCState* state, THCTensor *tensor, THByteTensor *mask, THCTensor *src) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedCopy)(state, tensor, maskCuda, src); THCudaByteTensor_free(state, maskCuda); } THC_API void THCTensor_(maskedSelect)(THCState* state, THCTensor* tensor, THCTensor* src, THCudaByteTensor* mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 3, tensor, src, mask)); THArgCheck(THCudaByteTensor_nElement(state, mask) == THCTensor_(nElement)(state, src), 2, "sizes do not match"); // Determine our output size ptrdiff_t totalElements = THCudaByteTensor_sumall(state, mask); THCTensor* tensorContig = THCTensor_(newContiguous)(state, tensor); THCTensor_(resize1d)(state, tensorContig, totalElements); if (tensor != tensorContig) { THCTensor_(resize1d)(state, tensor, totalElements); } // FIXME: there appears to be a bug in Thrust (CUDA 7.0) for mixed // iterator prefix sums? Convert `mask` to the same datatype as what // we're accumulating the prefix sum in (int64_t) to get around it THCudaLongTensor* maskLong = THCudaLongTensor_new(state); THLongStorage* maskSizes = THCudaByteTensor_newSizeOf(state, mask); THCudaLongTensor_resize(state, maskLong, maskSizes, NULL); THCudaLongTensor_copyCudaByte(state, maskLong, mask); // Use a prefix sum to determine the output locations of the masked elements THCudaLongTensor* maskPrefixSum = THCudaLongTensor_new(state); THCudaLongTensor_resize(state, maskPrefixSum, maskSizes, NULL); THLongStorage_free(maskSizes); THCThrustAllocator thrustAlloc(state); thrust::device_ptr<int64_t> maskData(THCudaLongTensor_data(state, maskLong)); thrust::device_ptr<int64_t> maskPrefixSumData(THCudaLongTensor_data(state, maskPrefixSum)); thrust::exclusive_scan( #if CUDA_VERSION >= 7000 thrust::cuda::par(thrustAlloc).on(THCState_getCurrentStream(state)), #endif maskData, maskData + THCudaLongTensor_nElement(state, maskLong), maskPrefixSumData); // Then copy over the masked elements at their desired output index bool status = THC_pointwiseApply3<uint8_t, int64_t, real>( state, mask, maskPrefixSum, src, TensorMaskedSelectOp<real, unsigned char, int64_t>( THCTensor_(data)(state, tensor))); THCudaLongTensor_free(state, maskLong); THCudaLongTensor_free(state, maskPrefixSum); if (tensor != tensorContig) { THCTensor_(freeCopyTo)(state, tensorContig, tensor); } else { THCTensor_(free)(state, tensorContig); } THArgCheck(status, 2, CUTORCH_DIM_WARNING); THCudaCheck(cudaGetLastError()); } // FIXME: remove now that we have THCudaByteTensor? THC_API void THCTensor_(maskedSelectByte)(THCState* state, THCTensor *tensor, THCTensor *src, THByteTensor *mask) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, tensor, src)); THLongStorage* maskSizes = THByteTensor_newSizeOf(mask); THCudaByteTensor* maskCuda = THCudaByteTensor_newWithSize(state, maskSizes, NULL); THLongStorage_free(maskSizes); THCudaByteTensor_copyByte(state, maskCuda, mask); THCTensor_(maskedSelect)(state, tensor, src, maskCuda); THCudaByteTensor_free(state, maskCuda); } #endif
3d251cac734484799c7fe224b65a4e0e6ab64b33.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "nodes/gabor_kernel.h" __global__ void GaborWeightsKernel(const int n, const bool scaled, const float phi, const int window_size, const int num_orientations, const int num_scales, const float *orientations, const float *scales, float * w) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { int num_filters = num_orientations * num_scales; int indexTemp = i; const int x = indexTemp % window_size; indexTemp /= window_size; const int y = indexTemp % window_size; indexTemp /= window_size; const int output_channel = indexTemp; const int ori = indexTemp % num_orientations; indexTemp /= num_orientations; const int scale = indexTemp; float half_window_size = (float)window_size / 2.0f; int cx = x - half_window_size; int cy = y - half_window_size; float theta = orientations[ori]; float sigma = scales[scale]; float lambda = sigma / 0.35f; float xprime = cx * cos(theta) + cy * sin(theta); float yprime = -cx * sin(theta) + cy * cos(theta); float xprime2 = xprime * xprime; float yprime2 = yprime * yprime; float sigma2 = sigma * sigma; float alpha = 1; if (scaled) alpha = 0.15915494309 / sigma2; w[i] = alpha * exp(-0.5 * (xprime2 + yprime2) / sigma2) * cos( 2* 3.14159265358979323846f * xprime / lambda + phi); } } GaborKernel::GaborKernel(deepflow::NodeParam * param) : Node(param) { LOG_IF(FATAL, param->has_gabor_kernel_param() == false) << "param.has_gabor_kernel_param() == false"; } void GaborKernel::init() { auto gparam = _param->gabor_kernel_param(); _apply_scale = gparam.apply_scale(); _phi = gparam.phi(); _num_orientations = gparam.orientations_size(); _num_scales = gparam.scales_size(); DF_NODE_CUDA_CHECK(hipMalloc(&_d_orientations, _num_orientations * sizeof(float))); DF_NODE_CUDA_CHECK(hipMemcpy(_d_orientations, gparam.orientations().data(), _num_orientations * sizeof(float), hipMemcpyHostToDevice)); DF_NODE_CUDA_CHECK(hipMalloc(&_d_scales, _num_scales * sizeof(float))); DF_NODE_CUDA_CHECK(hipMemcpy(_d_scales, gparam.scales().data(), _num_scales * sizeof(float), hipMemcpyHostToDevice)); float max_scale = -FLT_MAX; for (int i = 0; i < _num_scales; ++i) { float v = gparam.scales(i); if (v > max_scale) max_scale = v; } _window_size = ceil(4 * max_scale); _window_size = _window_size % 2 == 0 ? _window_size + 1 : _window_size; auto num_filters = _num_orientations * _num_scales; std::array<int, 4> dims = { num_filters, 1, _window_size, _window_size }; _outputs[0]->initValue(dims); generate(); } void GaborKernel::forward() { } void GaborKernel::backward() { } std::string GaborKernel::to_cpp() const { return std::string(); } void GaborKernel::generate() { auto size = _outputs[0]->value()->size(); GaborWeightsKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _apply_scale, _phi, _window_size, _num_orientations, _num_scales, _d_orientations, _d_scales, (float*)_outputs[0]->value()->gpu_data()); DF_KERNEL_CHECK(); }
3d251cac734484799c7fe224b65a4e0e6ab64b33.cu
#include "nodes/gabor_kernel.h" __global__ void GaborWeightsKernel(const int n, const bool scaled, const float phi, const int window_size, const int num_orientations, const int num_scales, const float *orientations, const float *scales, float * w) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < n) { int num_filters = num_orientations * num_scales; int indexTemp = i; const int x = indexTemp % window_size; indexTemp /= window_size; const int y = indexTemp % window_size; indexTemp /= window_size; const int output_channel = indexTemp; const int ori = indexTemp % num_orientations; indexTemp /= num_orientations; const int scale = indexTemp; float half_window_size = (float)window_size / 2.0f; int cx = x - half_window_size; int cy = y - half_window_size; float theta = orientations[ori]; float sigma = scales[scale]; float lambda = sigma / 0.35f; float xprime = cx * cos(theta) + cy * sin(theta); float yprime = -cx * sin(theta) + cy * cos(theta); float xprime2 = xprime * xprime; float yprime2 = yprime * yprime; float sigma2 = sigma * sigma; float alpha = 1; if (scaled) alpha = 0.15915494309 / sigma2; w[i] = alpha * exp(-0.5 * (xprime2 + yprime2) / sigma2) * cos( 2* 3.14159265358979323846f * xprime / lambda + phi); } } GaborKernel::GaborKernel(deepflow::NodeParam * param) : Node(param) { LOG_IF(FATAL, param->has_gabor_kernel_param() == false) << "param.has_gabor_kernel_param() == false"; } void GaborKernel::init() { auto gparam = _param->gabor_kernel_param(); _apply_scale = gparam.apply_scale(); _phi = gparam.phi(); _num_orientations = gparam.orientations_size(); _num_scales = gparam.scales_size(); DF_NODE_CUDA_CHECK(cudaMalloc(&_d_orientations, _num_orientations * sizeof(float))); DF_NODE_CUDA_CHECK(cudaMemcpy(_d_orientations, gparam.orientations().data(), _num_orientations * sizeof(float), cudaMemcpyHostToDevice)); DF_NODE_CUDA_CHECK(cudaMalloc(&_d_scales, _num_scales * sizeof(float))); DF_NODE_CUDA_CHECK(cudaMemcpy(_d_scales, gparam.scales().data(), _num_scales * sizeof(float), cudaMemcpyHostToDevice)); float max_scale = -FLT_MAX; for (int i = 0; i < _num_scales; ++i) { float v = gparam.scales(i); if (v > max_scale) max_scale = v; } _window_size = ceil(4 * max_scale); _window_size = _window_size % 2 == 0 ? _window_size + 1 : _window_size; auto num_filters = _num_orientations * _num_scales; std::array<int, 4> dims = { num_filters, 1, _window_size, _window_size }; _outputs[0]->initValue(dims); generate(); } void GaborKernel::forward() { } void GaborKernel::backward() { } std::string GaborKernel::to_cpp() const { return std::string(); } void GaborKernel::generate() { auto size = _outputs[0]->value()->size(); GaborWeightsKernel << < numOfBlocks(size), maxThreadsPerBlock >> > (size, _apply_scale, _phi, _window_size, _num_orientations, _num_scales, _d_orientations, _d_scales, (float*)_outputs[0]->value()->gpu_data()); DF_KERNEL_CHECK(); }
53c45db47f400e882bc0dcbbf7f603328a1f4a0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Cuda Vec Add #include <iostream> #include <stdio.h> #include <math.h> // Each thread produces one element of the output matrix __global__ void vecAdd_1(double *a, double *b, double *c, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n) { c[id] = a[id] + b[id]; } } // Each thread produces one row of the output matrix __global__ void vecAdd_2(double *a, double *b, double *c, int n) { int id = threadIdx.x; int id_1; for(int i = 0; i < n; i++) { id_1 = id * n + i; c[id_1] = a[id_1] + b[id_1]; } } // Each thread produces one column of the output matrix __global__ void vecAdd_3(double *a, double *b, double *c, int n) { int id = threadIdx.x; int id_1; for(int i = 0; i < n; i++) { id_1 = id + i * n; c[id_1] = a[id_1] + b[id_1]; } } int main(int argc, char* argv[]) { int n, m; printf( "Enter a value for matrix width :"); scanf("%d", &n); //cin >> n; printf( "Enter a value for matrix height :"); scanf("%d", &m); //cin>> m; if(n != m) { printf( "Matrices dimensions are not entered correctly !\n"); exit(0); } if(n > 1024 || m > 1024) { printf( "Matrices dimensions are not entered correctly \n"); exit(0); } // Host vectors double *h_a; double *h_b; double *h_c; //Device vectors double *d_a; double *d_b; double *d_c; // No of bytes size_t n_bytes = n*m*sizeof(double); // Allocating memory to host vectors h_a = (double*)malloc(n_bytes); h_b = (double*)malloc(n_bytes); h_c = (double*)malloc(n_bytes); // Allocating memory to device vectors hipMalloc(&d_a, n_bytes); hipMalloc(&d_b, n_bytes); hipMalloc(&d_c, n_bytes); // Initializing values of host vectors randomly for(int i = 0; i < n*m; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = sin(i) * sin(i); } // Copying values from host to device hipMemcpy(d_a, h_a, n_bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, n_bytes, hipMemcpyHostToDevice); int blockSize, gridSize; blockSize = n; gridSize = (int)ceil(float(n/blockSize)); // Starting the first kernel //vecAdd_1<<< n, blockSize>>>(d_a, d_b, d_c, n); // Starting the first kernel //vecAdd_2<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Starting the first kernel hipLaunchKernelGGL(( vecAdd_3), dim3(gridSize), dim3(blockSize), 0, 0, d_a, d_b, d_c, n); // Copying back from Device to Host hipMemcpy(h_c, d_c, n_bytes, hipMemcpyDeviceToHost); double sum = 0; for(int j = 0; j < n; j++) { sum += h_c[j]; } printf("Final result is: %f\n", sum/(double)n); hipFree(d_a); hipFree(d_b); hipFree(d_c); free(h_a); free(h_b); free(h_c); }
53c45db47f400e882bc0dcbbf7f603328a1f4a0d.cu
// Cuda Vec Add #include <iostream> #include <stdio.h> #include <math.h> // Each thread produces one element of the output matrix __global__ void vecAdd_1(double *a, double *b, double *c, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < n) { c[id] = a[id] + b[id]; } } // Each thread produces one row of the output matrix __global__ void vecAdd_2(double *a, double *b, double *c, int n) { int id = threadIdx.x; int id_1; for(int i = 0; i < n; i++) { id_1 = id * n + i; c[id_1] = a[id_1] + b[id_1]; } } // Each thread produces one column of the output matrix __global__ void vecAdd_3(double *a, double *b, double *c, int n) { int id = threadIdx.x; int id_1; for(int i = 0; i < n; i++) { id_1 = id + i * n; c[id_1] = a[id_1] + b[id_1]; } } int main(int argc, char* argv[]) { int n, m; printf( "Enter a value for matrix width :"); scanf("%d", &n); //cin >> n; printf( "Enter a value for matrix height :"); scanf("%d", &m); //cin>> m; if(n != m) { printf( "Matrices dimensions are not entered correctly !\n"); exit(0); } if(n > 1024 || m > 1024) { printf( "Matrices dimensions are not entered correctly \n"); exit(0); } // Host vectors double *h_a; double *h_b; double *h_c; //Device vectors double *d_a; double *d_b; double *d_c; // No of bytes size_t n_bytes = n*m*sizeof(double); // Allocating memory to host vectors h_a = (double*)malloc(n_bytes); h_b = (double*)malloc(n_bytes); h_c = (double*)malloc(n_bytes); // Allocating memory to device vectors cudaMalloc(&d_a, n_bytes); cudaMalloc(&d_b, n_bytes); cudaMalloc(&d_c, n_bytes); // Initializing values of host vectors randomly for(int i = 0; i < n*m; i++) { h_a[i] = sin(i) * sin(i); h_b[i] = sin(i) * sin(i); } // Copying values from host to device cudaMemcpy(d_a, h_a, n_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, n_bytes, cudaMemcpyHostToDevice); int blockSize, gridSize; blockSize = n; gridSize = (int)ceil(float(n/blockSize)); // Starting the first kernel //vecAdd_1<<< n, blockSize>>>(d_a, d_b, d_c, n); // Starting the first kernel //vecAdd_2<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Starting the first kernel vecAdd_3<<<gridSize, blockSize>>>(d_a, d_b, d_c, n); // Copying back from Device to Host cudaMemcpy(h_c, d_c, n_bytes, cudaMemcpyDeviceToHost); double sum = 0; for(int j = 0; j < n; j++) { sum += h_c[j]; } printf("Final result is: %f\n", sum/(double)n); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); }
acc0e6c416fa0084ed56c372faebd672c1264ac1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "basics/tensor.cu" #include <assert.h> #include <vector> #include "basics/session.hpp" #include "initializers/const_initializer.cu" #include "layers/fc.cu" #include "utils/utils.cu" __global__ void init_bottom(Tensor<float> * bottom) { for(int b = 0; b < bottom->GetDims()[0]; b++) { for(int i = 0; i < bottom->GetDims()[3]; i++) { bottom->at(b, 0, 0, i) = i+b; } } } __global__ void show_top(Tensor<float>* top) { for(int b = 0; b < top->GetDims()[0]; b++) { for(int i = 0; i < top->GetDims()[3]; i++) { printf("%f ", top->at(b, 0, 0, i)); } printf("\n"); } } void test_fc_gpu() { printf("Example code for fully connected layer on gpu\n"); hipError_t cudaStatus = hipSetDevice(0); checkCudaErrors(cudaStatus); size_t in_channels = 64; size_t out_channels = 10; Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 64; ConstInitializer<float> const_init(2.0, 1.0); FC<float> fc(in_channels, out_channels, &const_init); size_t b_dims[4] = {session->batch_size, 1, 1, in_channels}; Tensor<float>* bottom = Tensor<float>::CreateTensorGPU(b_dims); size_t t_dims[4] = {session->batch_size, 1, 1, out_channels}; Tensor<float>* top = Tensor<float>::CreateTensorGPU(t_dims); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); hipLaunchKernelGGL(( init_bottom), dim3(1), dim3(1), 0, 0, bottom); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); startTimer(); fc.Forward({bottom}, {top}); printf("fc layer forward: %3.4f ms \n", stopTimer()); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); hipLaunchKernelGGL(( show_top), dim3(1),dim3(1), 0, 0, top); cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); Tensor<float>* bottom_diff = Tensor<float>::CreateTensorGPU(b_dims); Tensor<float>* top_diff = Tensor<float>::CreateTensorGPU(t_dims); hipLaunchKernelGGL(( init_bottom), dim3(1), dim3(1), 0, 0, top_diff); startTimer(); fc.Backward({top}, {top_diff}, {bottom}, {bottom_diff}); printf("fc layer backward: %3.4f ms \n", stopTimer()); hipLaunchKernelGGL(( show_top), dim3(1),dim3(1), 0, 0, bottom_diff); hipFree(top_diff); hipFree(bottom_diff); hipFree(bottom); hipFree(top); } void test_fc_cpu() { printf("Example code for fully connected layer on cpu\n"); size_t in_channels = 64; size_t out_channels = 10; Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = 64; ConstInitializer<float> const_init(2.0, 1.0); FC<float> fc(in_channels, out_channels, &const_init); size_t b_dims[4] = {session->batch_size, 1, 1, in_channels}; Tensor<float>* bottom = Tensor<float>::CreateTensorCPU(b_dims); size_t t_dims[4] = {session->batch_size, 1, 1, out_channels}; Tensor<float>* top = Tensor<float>::CreateTensorCPU(t_dims); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < in_channels; i++) { bottom->at(b, 0, 0, i) = b+i; } } startTimer(); fc.Forward({bottom}, {top}); printf("conv layer forward: %3.4f ms \n", stopTimer()); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < out_channels; i++) { printf("%f ", top->at(b, 0, 0, i)); } printf("\n"); } Tensor<float>* bottom_diff = Tensor<float>::CreateTensorCPU(b_dims); Tensor<float>* top_diff = Tensor<float>::CreateTensorCPU(t_dims); for(int b = 0; b < session->batch_size; b++) { for(int o = 0; o < out_channels; o++) { top_diff->at(b, 0, 0, o) = (b+o)/2; } } fc.Backward({top}, {top_diff}, {bottom}, {bottom_diff}); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < in_channels; i++) { printf("%f ", bottom_diff->at(b, 0, 0, i)); } printf("\n"); } delete bottom; delete top; } int main() { // test_fc_cpu(); test_fc_gpu(); }
acc0e6c416fa0084ed56c372faebd672c1264ac1.cu
#include <stdio.h> #include "basics/tensor.cu" #include <assert.h> #include <vector> #include "basics/session.hpp" #include "initializers/const_initializer.cu" #include "layers/fc.cu" #include "utils/utils.cu" __global__ void init_bottom(Tensor<float> * bottom) { for(int b = 0; b < bottom->GetDims()[0]; b++) { for(int i = 0; i < bottom->GetDims()[3]; i++) { bottom->at(b, 0, 0, i) = i+b; } } } __global__ void show_top(Tensor<float>* top) { for(int b = 0; b < top->GetDims()[0]; b++) { for(int i = 0; i < top->GetDims()[3]; i++) { printf("%f ", top->at(b, 0, 0, i)); } printf("\n"); } } void test_fc_gpu() { printf("Example code for fully connected layer on gpu\n"); cudaError_t cudaStatus = cudaSetDevice(0); checkCudaErrors(cudaStatus); size_t in_channels = 64; size_t out_channels = 10; Session* session = Session::GetNewSession(); session->gpu = true; session->batch_size = 64; ConstInitializer<float> const_init(2.0, 1.0); FC<float> fc(in_channels, out_channels, &const_init); size_t b_dims[4] = {session->batch_size, 1, 1, in_channels}; Tensor<float>* bottom = Tensor<float>::CreateTensorGPU(b_dims); size_t t_dims[4] = {session->batch_size, 1, 1, out_channels}; Tensor<float>* top = Tensor<float>::CreateTensorGPU(t_dims); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); init_bottom<<<1, 1>>>(bottom); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); startTimer(); fc.Forward({bottom}, {top}); printf("fc layer forward: %3.4f ms \n", stopTimer()); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); show_top<<<1,1>>>(top); cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); Tensor<float>* bottom_diff = Tensor<float>::CreateTensorGPU(b_dims); Tensor<float>* top_diff = Tensor<float>::CreateTensorGPU(t_dims); init_bottom<<<1, 1>>>(top_diff); startTimer(); fc.Backward({top}, {top_diff}, {bottom}, {bottom_diff}); printf("fc layer backward: %3.4f ms \n", stopTimer()); show_top<<<1,1>>>(bottom_diff); cudaFree(top_diff); cudaFree(bottom_diff); cudaFree(bottom); cudaFree(top); } void test_fc_cpu() { printf("Example code for fully connected layer on cpu\n"); size_t in_channels = 64; size_t out_channels = 10; Session* session = Session::GetNewSession(); session->gpu = false; session->batch_size = 64; ConstInitializer<float> const_init(2.0, 1.0); FC<float> fc(in_channels, out_channels, &const_init); size_t b_dims[4] = {session->batch_size, 1, 1, in_channels}; Tensor<float>* bottom = Tensor<float>::CreateTensorCPU(b_dims); size_t t_dims[4] = {session->batch_size, 1, 1, out_channels}; Tensor<float>* top = Tensor<float>::CreateTensorCPU(t_dims); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < in_channels; i++) { bottom->at(b, 0, 0, i) = b+i; } } startTimer(); fc.Forward({bottom}, {top}); printf("conv layer forward: %3.4f ms \n", stopTimer()); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < out_channels; i++) { printf("%f ", top->at(b, 0, 0, i)); } printf("\n"); } Tensor<float>* bottom_diff = Tensor<float>::CreateTensorCPU(b_dims); Tensor<float>* top_diff = Tensor<float>::CreateTensorCPU(t_dims); for(int b = 0; b < session->batch_size; b++) { for(int o = 0; o < out_channels; o++) { top_diff->at(b, 0, 0, o) = (b+o)/2; } } fc.Backward({top}, {top_diff}, {bottom}, {bottom_diff}); for(int b = 0; b < session->batch_size; b++) { for(int i = 0; i < in_channels; i++) { printf("%f ", bottom_diff->at(b, 0, 0, i)); } printf("\n"); } delete bottom; delete top; } int main() { // test_fc_cpu(); test_fc_gpu(); }
972367e0eac3b71f1eb5287387d725f358cba7aa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void makeInclusive(double *Y, int N, const double *X) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N-1; i += gridDim.x * blockDim.x) { Y[i] = Y[i+1]; } if (blockDim.x * blockIdx.x + threadIdx.x == 0) Y[N-1] += X[N-1]; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; hipMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries hipLaunchKernelGGL(( scan_kernel_1), dim3(num_blocks), dim3(threads_per_block), 0, 0, input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) hipLaunchKernelGGL(( scan_kernel_2), dim3(1), dim3(num_blocks), 0, 0, carries); // Third step: Offset each thread group accordingly hipLaunchKernelGGL(( scan_kernel_3), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, N, carries); // Make inclusive hipLaunchKernelGGL(( makeInclusive), dim3(num_blocks), dim3(threads_per_block), 0, 0, output, N, input); hipFree(carries); } int main() { int N = 200; Timer timer; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; hipMalloc(&cuda_x, sizeof(double) * N); hipMalloc(&cuda_y, sizeof(double) * N); hipMemcpy(cuda_x, x, sizeof(double) * N, hipMemcpyHostToDevice); // Perform the exclusive scan and obtain results std::vector<double> timings; for(int reps=0; reps < 10; ++reps) { timer.reset(); exclusive_scan(cuda_x, cuda_y, N); timings.push_back(timer.get()); } std::sort(timings.begin(), timings.end()); double time_elapsed = timings[10/2]; hipMemcpy(z, cuda_y, sizeof(double) * N, hipMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; std::cout << "Timing took " << time_elapsed << " seconds" << std::endl; // // Clean up: // free(x); free(y); free(z); hipFree(cuda_x); hipFree(cuda_y); return EXIT_SUCCESS; }
972367e0eac3b71f1eb5287387d725f358cba7aa.cu
#include "poisson2d.hpp" #include "timer.hpp" #include <algorithm> #include <iostream> #include <stdio.h> __global__ void scan_kernel_1(double const *X, double *Y, int N, double *carries) { __shared__ double shared_buffer[256]; double my_value; unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); unsigned int block_offset = 0; // run scan on each section for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) { // load data: my_value = (i < N) ? X[i] : 0; // inclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); if (threadIdx.x >= stride) my_value += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_value; __syncthreads(); // exclusive scan requires us to write a zero value at the beginning of each block my_value = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; // write to output array if (i < N) Y[i] = block_offset + my_value; block_offset += shared_buffer[blockDim.x-1]; } // write carry: if (threadIdx.x == 0) carries[blockIdx.x] = block_offset; } // exclusive-scan of carries __global__ void scan_kernel_2(double *carries) { __shared__ double shared_buffer[256]; // load data: double my_carry = carries[threadIdx.x]; // exclusive scan in shared buffer: for(unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); if (threadIdx.x >= stride) my_carry += shared_buffer[threadIdx.x - stride]; } __syncthreads(); shared_buffer[threadIdx.x] = my_carry; __syncthreads(); // write to output array carries[threadIdx.x] = (threadIdx.x > 0) ? shared_buffer[threadIdx.x - 1] : 0; } __global__ void scan_kernel_3(double *Y, int N, double const *carries) { unsigned int work_per_thread = (N - 1) / (gridDim.x * blockDim.x) + 1; unsigned int block_start = work_per_thread * blockDim.x * blockIdx.x; unsigned int block_stop = work_per_thread * blockDim.x * (blockIdx.x + 1); __shared__ double shared_offset; if (threadIdx.x == 0) shared_offset = carries[blockIdx.x]; __syncthreads(); // add offset to each element in the block: for (unsigned int i = block_start + threadIdx.x; i < block_stop; i += blockDim.x) if (i < N) Y[i] += shared_offset; } __global__ void makeInclusive(double *Y, int N, const double *X) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < N-1; i += gridDim.x * blockDim.x) { Y[i] = Y[i+1]; } if (blockDim.x * blockIdx.x + threadIdx.x == 0) Y[N-1] += X[N-1]; } void exclusive_scan(double const * input, double * output, int N) { int num_blocks = 256; int threads_per_block = 256; double *carries; cudaMalloc(&carries, sizeof(double) * num_blocks); // First step: Scan within each thread group and write carries scan_kernel_1<<<num_blocks, threads_per_block>>>(input, output, N, carries); // Second step: Compute offset for each thread group (exclusive scan for each thread group) scan_kernel_2<<<1, num_blocks>>>(carries); // Third step: Offset each thread group accordingly scan_kernel_3<<<num_blocks, threads_per_block>>>(output, N, carries); // Make inclusive makeInclusive<<<num_blocks, threads_per_block>>>(output, N, input); cudaFree(carries); } int main() { int N = 200; Timer timer; // // Allocate host arrays for reference // double *x = (double *)malloc(sizeof(double) * N); double *y = (double *)malloc(sizeof(double) * N); double *z = (double *)malloc(sizeof(double) * N); std::fill(x, x + N, 1); // reference calculation: y[0] = 0; for (std::size_t i=1; i<N; ++i) y[i] = y[i-1] + x[i-1]; // // Allocate CUDA-arrays // double *cuda_x, *cuda_y; cudaMalloc(&cuda_x, sizeof(double) * N); cudaMalloc(&cuda_y, sizeof(double) * N); cudaMemcpy(cuda_x, x, sizeof(double) * N, cudaMemcpyHostToDevice); // Perform the exclusive scan and obtain results std::vector<double> timings; for(int reps=0; reps < 10; ++reps) { timer.reset(); exclusive_scan(cuda_x, cuda_y, N); timings.push_back(timer.get()); } std::sort(timings.begin(), timings.end()); double time_elapsed = timings[10/2]; cudaMemcpy(z, cuda_y, sizeof(double) * N, cudaMemcpyDeviceToHost); // // Print first few entries for reference // std::cout << "CPU y: "; for (int i=0; i<10; ++i) std::cout << y[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << y[i] << " "; std::cout << std::endl; std::cout << "GPU y: "; for (int i=0; i<10; ++i) std::cout << z[i] << " "; std::cout << " ... "; for (int i=N-10; i<N; ++i) std::cout << z[i] << " "; std::cout << std::endl; std::cout << "Timing took " << time_elapsed << " seconds" << std::endl; // // Clean up: // free(x); free(y); free(z); cudaFree(cuda_x); cudaFree(cuda_y); return EXIT_SUCCESS; }
fb5e2ff087d69255a2c0977a1e64fc34d3b42cc9.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include"pathalg.h" static const int WORK_SIZE =258; void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){ }; void Bellmanor::dellocate(){ }; void Bellmanor::allocate(int maxn,int maxedge){ } void Bellmanor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf) { nodenum=ginf.pnodesize; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; mark=new int; *mark=0; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; has=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; w=new int[edges.size()*LY]; m=new int; esignes=new int[edges.size()*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); neibn=nein; vector<vector<int>>neie(nodenum,vector<int>()); for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=i; else te[count]=neibn[i][j]; count++; } for(int i=0;i<nodenum*LY*YE;i++) d[i]=INT_MAX/2,p[i]=-1,has[i]=-1; int cc=0; for(int k=0;k<LY;k++) for(int i=0;i<edges.size();i++) w[cc++]=esigns[k][i]; cout<<cc<<" "<<edges.size()<<endl; for(int k=0;k<LY;k++) { int boff=k*YE*nodenum; for(int i=0;i<YE;i++) { int soff=i*nodenum; for(int j=0;j<stpair.size();j++) {d[boff+soff+stpair[i].first]=0; has[boff+soff+stpair[i].first]=0; } } } hipMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_has,YE*LY*nodenum*sizeof(int)); hipMalloc((void**)&dev_w,LY*edges.size()*sizeof(int)); hipMalloc((void**)&dev_m,sizeof(int)); if(dev_d==NULL) { printf("couldn't allocate %d int's.\n"); } hipMemcpy(dev_te,te,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_st,st,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_w,w,LY*edges.size()*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_d,d,YE*LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_has,has,YE*LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_p,p,YE*LY*nodenum*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(dev_m,m,sizeof(int),hipMemcpyHostToDevice); cout<<nodenum<<endl; }; Bellmanor::Bellmanor() { }; __global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid],weight=w[eid]; if(weight<0)return; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(has[s+off]<round-1)return; if(d[s+off]+weight<d[t+off]) { d[t+off]=weight+d[s+off]; has[t+off]=round; *m=1; } } __global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid],weight=w[eid]; if(weight<0)return; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(has[s+off]<round-1)return; if(d[s+off]+weight==d[t+off]) pre[t+off]=s+off; } vector<vector<int>> Bellmanor::routalg(int s,int t,int bw) { int kk=1; time_t start,end; start=clock(); int size=edges.size()*LY*YE; cout<<"size is: "<<size<<endl; *m=1; int round=1; while(*m==1) { *m=0; hipMemcpy(dev_m,m,sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( bellmanhigh), dim3(size/1024+1),dim3(1024), 0, 0, dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size,dev_m,round); hipLaunchKernelGGL(( color), dim3(size/1024+1),dim3(1024), 0, 0, dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size,round); round++; hipMemcpy(m,dev_m,sizeof(int),hipMemcpyDeviceToHost); } hipMemcpy(d,dev_d,LY*YE*nodenum*sizeof(int),hipMemcpyDeviceToHost); hipStreamSynchronize(0); end=clock(); cout<<"GPU time is : "<<end-start<<endl; cout<<"over!"<<endl; vector<vector<int>>result(LY,vector<int>()); hipFree(dev_te); hipFree(dev_st); hipFree(dev_d); hipFree(dev_w); cout<<"before return"<<endl; return result; };
fb5e2ff087d69255a2c0977a1e64fc34d3b42cc9.cu
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include"pathalg.h" static const int WORK_SIZE =258; void Bellmanor::copydata(int s,vector<edge>&edges,int nodenum){ }; void Bellmanor::dellocate(){ }; void Bellmanor::allocate(int maxn,int maxedge){ } void Bellmanor::topsort() { cout<<" in top sort "<<endl; queue<int>zero; vector<int>order(nodenum*LY,-1); for(int i=0;i<nodenum*LY;i++) zero.push(i); int biao=0; while(!zero.empty()) { int node=zero.front(); zero.pop(); order[node]=biao++; for(int i=0;i<neibn[node].size();i++) { if((--ancestor[neibn[node][i]])==0) zero.push(neibn[node][i]); } } vector<pair<int,int>>tmp; for(int i=0;i<order.size();i++) tmp.push_back(make_pair(i,order[i])); for(int i=0;i<order.size();i++) ordernode.push_back(tmp[i].first); }; void Bellmanor::init(pair<vector<edge>,vector<vector<int>>>ext,vector<pair<int,int>>stpair,vector<vector<int>>&relate,ginfo ginf) { nodenum=ginf.pnodesize; edges=ext.first; vector<vector<int>>esigns; esigns=ext.second; stp=stpair; mark=new int; *mark=0; W=WD+1; st=new int[edges.size()*LY]; te=new int[edges.size()*LY]; d=new int[nodenum*LY*YE]; has=new int[nodenum*LY*YE]; p=new int[nodenum*LY*YE]; w=new int[edges.size()*LY]; m=new int; esignes=new int[edges.size()*LY]; vector<vector<int>>nein(nodenum*LY,vector<int>()); neibn=nein; vector<vector<int>>neie(nodenum,vector<int>()); for(int i=0;i<edges.size();i++) { int s=edges[i].s; int t=edges[i].t; neibn[s].push_back(t); neie[s].push_back(i); } int count=0; for(int k=0;k<LY;k++) for(int i=0;i<nodenum;i++) for(int j=0;j<neibn[i].size();j++) { st[count]=i; if(esigns[k][neie[i][j]]<0) te[count]=i; else te[count]=neibn[i][j]; count++; } for(int i=0;i<nodenum*LY*YE;i++) d[i]=INT_MAX/2,p[i]=-1,has[i]=-1; int cc=0; for(int k=0;k<LY;k++) for(int i=0;i<edges.size();i++) w[cc++]=esigns[k][i]; cout<<cc<<" "<<edges.size()<<endl; for(int k=0;k<LY;k++) { int boff=k*YE*nodenum; for(int i=0;i<YE;i++) { int soff=i*nodenum; for(int j=0;j<stpair.size();j++) {d[boff+soff+stpair[i].first]=0; has[boff+soff+stpair[i].first]=0; } } } cudaMalloc((void**)&dev_st,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_te,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_d,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_p,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_has,YE*LY*nodenum*sizeof(int)); cudaMalloc((void**)&dev_w,LY*edges.size()*sizeof(int)); cudaMalloc((void**)&dev_m,sizeof(int)); if(dev_d==NULL) { printf("couldn't allocate %d int's.\n"); } cudaMemcpy(dev_te,te,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_st,st,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_w,w,LY*edges.size()*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_d,d,YE*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_has,has,YE*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_p,p,YE*LY*nodenum*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_m,m,sizeof(int),cudaMemcpyHostToDevice); cout<<nodenum<<endl; }; Bellmanor::Bellmanor() { }; __global__ void bellmanhigh(int *st,int *te,int *d,int *has,int *w,int E,int N,int size,int *m,int round) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid],weight=w[eid]; if(weight<0)return; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(has[s+off]<round-1)return; if(d[s+off]+weight<d[t+off]) { d[t+off]=weight+d[s+off]; has[t+off]=round; *m=1; } } __global__ void color(int *st,int *te,int *d,int *pre,int *has,int *w,int E,int N,int size,int round) { int i = threadIdx.x + blockIdx.x*blockDim.x; if(i>size)return; int eid=(i%(E*LY)); int s=st[eid],t=te[eid],weight=w[eid]; if(weight<0)return; int off=(i/(E*LY))*N+(eid/E)*N*YE; if(has[s+off]<round-1)return; if(d[s+off]+weight==d[t+off]) pre[t+off]=s+off; } vector<vector<int>> Bellmanor::routalg(int s,int t,int bw) { int kk=1; time_t start,end; start=clock(); int size=edges.size()*LY*YE; cout<<"size is: "<<size<<endl; *m=1; int round=1; while(*m==1) { *m=0; cudaMemcpy(dev_m,m,sizeof(int),cudaMemcpyHostToDevice); bellmanhigh<<<size/1024+1,1024>>>(dev_st,dev_te,dev_d,dev_has,dev_w,edges.size(),nodenum,size,dev_m,round); color<<<size/1024+1,1024>>>(dev_st,dev_te,dev_d,dev_p,dev_has,dev_w,edges.size(),nodenum,size,round); round++; cudaMemcpy(m,dev_m,sizeof(int),cudaMemcpyDeviceToHost); } cudaMemcpy(d,dev_d,LY*YE*nodenum*sizeof(int),cudaMemcpyDeviceToHost); cudaStreamSynchronize(0); end=clock(); cout<<"GPU time is : "<<end-start<<endl; cout<<"over!"<<endl; vector<vector<int>>result(LY,vector<int>()); cudaFree(dev_te); cudaFree(dev_st); cudaFree(dev_d); cudaFree(dev_w); cout<<"before return"<<endl; return result; };
6bd034d5a2455fb320720f273c192bd9afd508e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright (c) 2019 by Contributors * \file array/cuda/array_op_impl.cu * \brief Array operator GPU implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { int FindNumThreads(int dim, int max_nthrs) { int ret = max_nthrs; while (ret > dim) { ret = ret >> 1; } return ret; } ///////////////////////////// Range ///////////////////////////// template <typename IdType> __global__ void _RangeKernel(IdType* out, IdType low, IdType length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = low + tx; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray Range(IdType low, IdType high, DLContext ctx) { CHECK(high >= low) << "high must be bigger than low"; const IdType length = high - low; IdArray ret = NewIdArray(length, ctx, sizeof(IdType) * 8); if (length == 0) return ret; IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length, 1024); int nb = (length + nt - 1) / nt; hipLaunchKernelGGL(( _RangeKernel<IdType>), dim3(nb), dim3(nt), 0, thr_entry->stream, ret_data, low, length); return ret; } template IdArray Range<kDLGPU, int32_t>(int32_t, int32_t, DLContext); template IdArray Range<kDLGPU, int64_t>(int64_t, int64_t, DLContext); ///////////////////////////// AsNumBits ///////////////////////////// template <typename InType, typename OutType> __global__ void _CastKernel(const InType* in, OutType* out, size_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = in[tx]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray AsNumBits(IdArray arr, uint8_t bits) { const std::vector<int64_t> shape(arr->shape, arr->shape + arr->ndim); IdArray ret = IdArray::Empty(shape, DLDataType{kDLInt, bits, 1}, arr->ctx); const int64_t length = ret.NumElements(); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length, 1024); int nb = (length + nt - 1) / nt; if (bits == 32) { hipLaunchKernelGGL(( _CastKernel<IdType, int32_t>), dim3(nb), dim3(nt), 0, thr_entry->stream, static_cast<IdType*>(arr->data), static_cast<int32_t*>(ret->data), length); } else { hipLaunchKernelGGL(( _CastKernel<IdType, int64_t>), dim3(nb), dim3(nt), 0, thr_entry->stream, static_cast<IdType*>(arr->data), static_cast<int64_t*>(ret->data), length); } return ret; } template IdArray AsNumBits<kDLGPU, int32_t>(IdArray arr, uint8_t bits); template IdArray AsNumBits<kDLGPU, int64_t>(IdArray arr, uint8_t bits); } // namespace impl } // namespace aten } // namespace dgl
6bd034d5a2455fb320720f273c192bd9afd508e1.cu
/*! * Copyright (c) 2019 by Contributors * \file array/cuda/array_op_impl.cu * \brief Array operator GPU implementation */ #include <dgl/array.h> #include "../../runtime/cuda/cuda_common.h" namespace dgl { using runtime::NDArray; namespace aten { namespace impl { int FindNumThreads(int dim, int max_nthrs) { int ret = max_nthrs; while (ret > dim) { ret = ret >> 1; } return ret; } ///////////////////////////// Range ///////////////////////////// template <typename IdType> __global__ void _RangeKernel(IdType* out, IdType low, IdType length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = low + tx; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray Range(IdType low, IdType high, DLContext ctx) { CHECK(high >= low) << "high must be bigger than low"; const IdType length = high - low; IdArray ret = NewIdArray(length, ctx, sizeof(IdType) * 8); if (length == 0) return ret; IdType* ret_data = static_cast<IdType*>(ret->data); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length, 1024); int nb = (length + nt - 1) / nt; _RangeKernel<IdType><<<nb, nt, 0, thr_entry->stream>>>(ret_data, low, length); return ret; } template IdArray Range<kDLGPU, int32_t>(int32_t, int32_t, DLContext); template IdArray Range<kDLGPU, int64_t>(int64_t, int64_t, DLContext); ///////////////////////////// AsNumBits ///////////////////////////// template <typename InType, typename OutType> __global__ void _CastKernel(const InType* in, OutType* out, size_t length) { int tx = blockIdx.x * blockDim.x + threadIdx.x; int stride_x = gridDim.x * blockDim.x; while (tx < length) { out[tx] = in[tx]; tx += stride_x; } } template <DLDeviceType XPU, typename IdType> IdArray AsNumBits(IdArray arr, uint8_t bits) { const std::vector<int64_t> shape(arr->shape, arr->shape + arr->ndim); IdArray ret = IdArray::Empty(shape, DLDataType{kDLInt, bits, 1}, arr->ctx); const int64_t length = ret.NumElements(); auto* thr_entry = runtime::CUDAThreadEntry::ThreadLocal(); int nt = FindNumThreads(length, 1024); int nb = (length + nt - 1) / nt; if (bits == 32) { _CastKernel<IdType, int32_t><<<nb, nt, 0, thr_entry->stream>>>( static_cast<IdType*>(arr->data), static_cast<int32_t*>(ret->data), length); } else { _CastKernel<IdType, int64_t><<<nb, nt, 0, thr_entry->stream>>>( static_cast<IdType*>(arr->data), static_cast<int64_t*>(ret->data), length); } return ret; } template IdArray AsNumBits<kDLGPU, int32_t>(IdArray arr, uint8_t bits); template IdArray AsNumBits<kDLGPU, int64_t>(IdArray arr, uint8_t bits); } // namespace impl } // namespace aten } // namespace dgl
92c9d662da2b1303971e7cd9ebb64a564744deda.hip
// !!! This is a file automatically generated by hipify!!! #define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/hip/HIPMathCompat.h> #include <ATen/hip/ApplyGridUtils.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/native/hip/Loops.cuh> namespace at { namespace native { namespace { void mish_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const opmath_t x_acc = static_cast<opmath_t>(x); return x_acc * c10::hip::compat::tanh( c10::hip::compat::log1p(c10::hip::compat::exp(x_acc))); }); }); } void mish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const opmath_t dy_acc = static_cast<opmath_t>(dy); const opmath_t x_acc = static_cast<opmath_t>(x); const opmath_t s_acc = opmath_t(1) / (opmath_t(1) + c10::hip::compat::exp(-x_acc)); const opmath_t t_acc = c10::hip::compat::tanh( c10::hip::compat::log1p(c10::hip::compat::exp(x_acc))); return dy_acc * (t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc)); }); }); } } // namespace REGISTER_DISPATCH(mish_stub, &mish_kernel); REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel); } // namespace native } // namespace at
92c9d662da2b1303971e7cd9ebb64a564744deda.cu
#define TORCH_ASSERT_NO_OPERATORS #define _USE_MATH_DEFINES #include <ATen/native/Activation.h> #include <cmath> #include <thrust/tuple.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/core/TensorBase.h> #include <c10/core/Scalar.h> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/cuda/ApplyGridUtils.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/native/cuda/Loops.cuh> namespace at { namespace native { namespace { void mish_kernel(TensorIteratorBase& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const opmath_t x_acc = static_cast<opmath_t>(x); return x_acc * c10::cuda::compat::tanh( c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); }); }); } void mish_backward_kernel(TensorIterator& iter) { AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, iter.dtype(), "mish_backward_cuda", [&]() { gpu_kernel(iter, [] GPU_LAMBDA(scalar_t dy, scalar_t x) -> scalar_t { using opmath_t = at::opmath_type<scalar_t>; const opmath_t dy_acc = static_cast<opmath_t>(dy); const opmath_t x_acc = static_cast<opmath_t>(x); const opmath_t s_acc = opmath_t(1) / (opmath_t(1) + c10::cuda::compat::exp(-x_acc)); const opmath_t t_acc = c10::cuda::compat::tanh( c10::cuda::compat::log1p(c10::cuda::compat::exp(x_acc))); return dy_acc * (t_acc + x_acc * s_acc * (opmath_t(1) - t_acc * t_acc)); }); }); } } // namespace REGISTER_DISPATCH(mish_stub, &mish_kernel); REGISTER_DISPATCH(mish_backward_stub, &mish_backward_kernel); } // namespace native } // namespace at
ab84fa1d228a25d49d64215ec80e6397c0a9dae9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function avg-pools an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output */ __global__ void subsample(float *input, float *output, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = sum; } } } static int cunn_SpatialAveragePooling_updateOutput(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); float *output_data; float *input_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[0]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(input); input_data = THCudaTensor_data(input); THCudaTensor_resize3d(output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[1]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(input); input_data = THCudaTensor_data(input); THCudaTensor_resize4d(output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel hipLaunchKernelGGL(( subsample) , dim3(blocks), dim3(threads), 0, 0, input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(input); // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialSubsampling.updateOutput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } /* * Description: * this function computes the gradInput from gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from gradOutput * but with an atomic accumulation. It is needed to be done so * for cases of kH != dH and kW != dW */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nInputPlane = input->size[0]; float *gradOutput_data = THCudaTensor_data(gradOutput); float *gradInput_data; THCudaTensor_resizeAs(gradInput, input); THCudaTensor_zero(gradInput); gradInput_data = THCudaTensor_data(gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; float *gradOutput_data = THCudaTensor_data(gradOutput); float *gradInput_data; THCudaTensor_resizeAs(gradInput, input); THCudaTensor_zero(gradInput); gradInput_data = THCudaTensor_data(gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, 0, gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors hipError_t err = hipGetLastError(); if (err != hipSuccess) { printf("error in SpatialSubsampling.updateGradInput: %s\n", hipGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = { {"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput}, {"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput}, {NULL, NULL} }; static void cunn_SpatialAveragePooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn"); lua_pop(L,1); } #undef CUDA_MAX_THREADS
ab84fa1d228a25d49d64215ec80e6397c0a9dae9.cu
#define CUDA_MAX_THREADS 1024 // this is safe, in reality 256 is our limit /* * Description: * this function avg-pools an input 3D tensor along dimensions 1 and 2 * 3D input, 3D output */ __global__ void subsample(float *input, float *output, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane output = output + o*output_w*output_h; input = input + i*input_w*input_h; // For all output pixels... for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { // Compute the mean of the input image... float *ptr_input = input + yy*dH*input_w + xx*dW; float *ptr_output = output + yy*output_w + xx; float sum = 0; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) sum += ptr_input[kx]; ptr_input += input_w; // next input line } // Update output *ptr_output = sum; } } } static int cunn_SpatialAveragePooling_updateOutput(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *output = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); float *output_data; float *input_data; luaL_argcheck(L, input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[0]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(input); input_data = THCudaTensor_data(input); THCudaTensor_resize3d(output, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads>>> (input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nbatch = input->size[0]; long nOutputCols = (nInputCols - kW) / dW + 1; long nOutputRows = (nInputRows - kH) / dH + 1; long nInputPlane = input->size[1]; luaL_argcheck(L, nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size"); input = THCudaTensor_newContiguous(input); input_data = THCudaTensor_data(input); THCudaTensor_resize4d(output, nbatch, nInputPlane, nOutputRows, nOutputCols); output_data = THCudaTensor_data(output); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run subsample kernel subsample <<<blocks, threads>>> (input_data, output_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } // clean THCudaTensor_free(input); // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialSubsampling.updateOutput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } /* * Description: * this function computes the gradInput from gradOutput */ __global__ void subgradinput(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) ptr_gradInput[kx] += z; ptr_gradInput += input_w; } } } } /* * Description: * this function computes the gradInput from gradOutput * but with an atomic accumulation. It is needed to be done so * for cases of kH != dH and kW != dW */ __global__ void subgradinputAtomic(float *gradInput, float *gradOutput, int input_n, int input_h, int input_w, int kH, int kW, int dH, int dW) { // iterators int xx, yy; // output size int output_w = (input_w - kW) / dW + 1; int output_h = (input_h - kH) / dH + 1; // compute offsets based on thread/block ID int o = blockIdx.x; int i = o; int xx_start = threadIdx.x; int xx_end = output_w; int xx_step = blockDim.x; int yy_start = blockDim.y*blockIdx.y + threadIdx.y; int yy_end = output_h; int yy_step = blockDim.y*gridDim.y; // select input/output plane gradOutput = gradOutput + o*output_w*output_h; gradInput = gradInput + i*input_w*input_h; // compute gradInput for(yy = yy_start; yy < yy_end; yy+=yy_step) { for(xx = xx_start; xx < xx_end; xx+=xx_step) { float *ptr_gradInput = gradInput + yy*dH*input_w + xx*dW; float *ptr_gradOutput = gradOutput + yy*output_w + xx; float z = *ptr_gradOutput; int kx, ky; for(ky = 0; ky < kH; ky++) { for(kx = 0; kx < kW; kx++) { atomicAdd(&(ptr_gradInput[kx]), z); } ptr_gradInput += input_w; } } } } static int cunn_SpatialAveragePooling_updateGradInput(lua_State *L) { THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); int kW = luaT_getfieldcheckint(L, 1, "kW"); int kH = luaT_getfieldcheckint(L, 1, "kH"); int dW = luaT_getfieldcheckint(L, 1, "dW"); int dH = luaT_getfieldcheckint(L, 1, "dH"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); if (input->nDimension == 3) { long nInputCols = input->size[2]; long nInputRows = input->size[1]; long nInputPlane = input->size[0]; float *gradOutput_data = THCudaTensor_data(gradOutput); float *gradInput_data; THCudaTensor_resizeAs(gradInput, input); THCudaTensor_zero(gradInput); gradInput_data = THCudaTensor_data(gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane,yblocks); dim3 threads(32,8); // run updateGradInput kernel subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { long nInputCols = input->size[3]; long nInputRows = input->size[2]; long nInputPlane = input->size[1]; long nbatch = input->size[0]; float *gradOutput_data = THCudaTensor_data(gradOutput); float *gradInput_data; THCudaTensor_resizeAs(gradInput, input); THCudaTensor_zero(gradInput); gradInput_data = THCudaTensor_data(gradInput); // cuda blocks & threads: int yblocks = (int)(16L / nInputPlane); yblocks = yblocks < 1 ? 1 : yblocks; dim3 blocks(nInputPlane*nbatch,yblocks); dim3 threads(32,8); // run updateGradInput kernel if (kH == dH && kW == dW) { subgradinput <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } else { subgradinputAtomic <<<blocks, threads>>> (gradInput_data, gradOutput_data, nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW); } } // check for errors cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) { printf("error in SpatialSubsampling.updateGradInput: %s\n", cudaGetErrorString(err)); THError("aborting"); } return 1; } static const struct luaL_Reg cunn_SpatialAveragePooling__ [] = { {"SpatialAveragePooling_updateOutput", cunn_SpatialAveragePooling_updateOutput}, {"SpatialAveragePooling_updateGradInput", cunn_SpatialAveragePooling_updateGradInput}, {NULL, NULL} }; static void cunn_SpatialAveragePooling_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, cunn_SpatialAveragePooling__, "nn"); lua_pop(L,1); } #undef CUDA_MAX_THREADS
50b204fa88bf22ad3f13070c07c72fa6a5de299c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // To make CUDA Toolkit compatible with GCC 4.7 #undef _GLIBCXX_ATOMIC_BUILTINS #include <iostream> #include <fcntl.h> #include <fstream> #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <stdint.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include <sys/types.h> #include <math.h> #include <string.h> #include <map> #include <cutil_inline.h> #include "Itemset.cu" #include "ListItemset.cu" #include "HashTree.cu" #include "pardhp.h" #include "global_constants.h" struct check_indexes{ int idx; int lb; int ub; int m; int blk; } *device_check_index,*check_index; __device__ int g_barrier; __device__ int temp_counter=0; __device__ int device_hash_pos[8000][10000]; __device__ int trans_bitvec[8000][1001]; __device__ int start[8000][30]; __device__ int enda[8000][30]; //__device__ int *addr_touched[120][2000]; //__device__ int addr_counter[120]; //__device__ int addr_iteration; __device__ int max_threshold; __device__ __host__ int choose(int n, int k) { int i; int val = 1; if (k >= 0 && k <= n){ for (i=n; i > n-k; i--) val *= i; for (i=2; i <= k; i++) val /= i; } return val; } __device__ __host__ int get_hash_function(int num, int k) { int threshold =2 ; int hash = (int)ceil(pow((float)num/threshold, (float)1.0/k)); if (hash < 1) hash = 1; return hash; } __device__ void gpu_interblock_sync(int goalVal) { int tid = threadIdx.x; if (tid==0) { atomicAdd(&g_barrier,1); //printf("\n%d %d",blockIdx.x,oldVal); } while(g_barrier!=goalVal) { } } __device__ __host__ void form_hash_indx( int *local_hash_index ,int hash_function, int maxitem) { int i, cnt; i=0; printf("\n HASH_FUNCTION = %d", hash_function); if (hash_function == 1) { return; } while(i < maxitem){ for(cnt = 0; i < maxitem && cnt < hash_function; i++) if (local_hash_index[i] == 0){ local_hash_index[i] = cnt; //printf("\n i: %d, val:%d",i,hash_index[i]); cnt++; } for(cnt = hash_function-1;i < maxitem && cnt >= 0; i++) if (local_hash_index[i] == 0){ local_hash_index[i] = cnt; //printf("\n i: %d, val:%d",i,hash_index[i]); cnt--; } } } __device__ int init_subsets(int *starts, int *endas, int num_item, int k_item) { int i; if (num_item < k_item) return 0; for (i=0; i < k_item; i++){ starts[i] = i; endas[i] = num_item - k_item + 1 + i; } return 1; } __device__ int get_next_subset(int *starts, int *endas, int k_item) { int i,j; for (i=k_item-1; i >= 0; i--){ starts[i]++; if (starts[i] < endas[i]){ for (j=i+1; j < k_item; j++) starts[j] = starts[j-1]+1; return 1; } } return 0; } __device__ ListElement* find_in_list(Itemset *element, int sz, ListElement *head, int thread_id) { for(;head; head = head->next()){ Itemset *curr = head->item(); for(int i=0; i < sz; i++){ int it = element->item((start[thread_id])[i]); if (curr->item(i) < it) break; else if (curr->item(i) > it) return NULL; else if (i==sz-1) return head; } } return NULL; } __device__ int not_prune(ListElement *curr, int k, ListElement *beg, int thread_id) { if (k+1 == 3){ start[thread_id][0] = 1; start[thread_id][1] = 2; if ((beg = find_in_list(curr->Item, k, beg, thread_id)) == NULL) return 0; } else{ int res = init_subsets(start[thread_id], enda[thread_id], curr->Item->numitems(), k); start[thread_id][k-2] = curr->Item->numitems()-2; start[thread_id][k-1] = curr->Item->numitems()-1; while (res){ if ((beg = find_in_list(curr->Item, k, beg, thread_id)) == NULL) return 0; res = get_next_subset(start[thread_id], enda[thread_id], k); } } return 1; } __device__ int ass_cnt = 0; __device__ void assign_lsupcnt_offset(HashTree *node, int &val) { if (node->is_leaf()) { ListItemset *list = node->list(); /*printf("\n first: %p next: %p",list->first(),list->First->Next); printf("\n last: %p next: %p",list->last(),list->Last->Next); printf("\n numitem: %d",list->numitem); */ if(list && list->first()) { ListElement *iter = list->first(); for(;iter;iter = iter->next()) { iter->item()->set_sup(val++); } } } else { for(int i=0; i < node->hash_function(); i++) if (node->hash_table(i)) { assign_lsupcnt_offset(node->hash_table(i), val); } } } __device__ inline void make_Itemset(Itemset *it, int *buf, int numitem, int tid) { int j; it->set_tid(tid); it->set_numitems(numitem); for (j=0; j < numitem; j++){ it->add_item(j, (int) buf[j]); } } __device__ void apriori_gen(int debug ,int total_threads,int k, int thread_id ,ListElement *tree_listElement, int *tree_listElement_cntr,ListItemset* Largelist, int*gen_check , HashTree * device_HashTree_FreeList,ListElement *temp_listElement) { //int ss,lb,ub,blk,index,counter=0,counter_2=0; int lb,ub,blk; blk = ceil((double)Largelist->numitem/(double)total_threads); if(blk==0) blk=1; if(blk==1 && thread_id>=Largelist->numitem) lb = -1; else { lb = thread_id *blk; } ub = min((thread_id+1)*blk, Largelist->numitem); if(lb>=0 && ub<=Largelist->numitem &&lb<Largelist->numitem) { // if (debug==1) //printf("\nApriori_gen thread_id= %d, blk= %d ,lb= %d, ub= %d, device_Largelist->numitems= %d nblocks= %d intraLock =%d", thread_id,blk, lb, ub, Largelist->numitem,total_threads,intraCTALock); ListElement *L1iter = Largelist->node(lb); for (int i=lb; i < ub && L1iter; i++, L1iter = L1iter->next()) { Itemset *temp = L1iter->item(); //printf("\n thread_id: %d ,i:%d ,item: %d",thread_id,i,temp->item(0)); ListElement *L2iter = L1iter->next(); for(;L2iter; L2iter = L2iter->next()) { if(debug==1) printf("\nbeg of inner loop,thread_id %d",thread_id); Itemset *temp2 = L2iter->item(); if (temp->compare(*temp2,k-2) < 0) break; else { if(debug==1) printf("\nelse part ,thread_id %d",thread_id); int k1 = temp->item(k-2); int k2 = temp2->item(k-2); if (k1 < k2) { // In order to save memory, we will use a temp listElement and if it passes the prune stage // we will allocate an actual listelement from the free list ListElement *it_temp = &temp_listElement[thread_id]; it_temp->Item->set_numitems(k); for (int l=0; l < temp->numitems(); l++) it_temp->Item->add_item(l,temp->item(l)); it_temp->Item->add_item(k-1, k2); ListElement *beg = Largelist->first(); if(debug==1) printf("\n create item list ,thread_id %d",thread_id); if(k==2 || not_prune(it_temp, k-1, beg, thread_id)) { if(debug==1) printf("\n inside prune ,thread_id %d",thread_id); int val = atomicAdd(tree_listElement_cntr,1); ListElement *it = &tree_listElement[val]; it->Item->set_numitems(k); it->Item->support=-1; for (int l=0; l < temp->numitems(); l++) it->Item->add_item(l,temp->item(l)); it->Item->add_item(k-1, k2); it->Item->set_sup(0); if(debug==1) printf("\nApriori_Kernel calls add element,thread_id:%d",thread_id); debug=0; device_Candidate->add_element(debug,it,thread_id,device_HashTree_FreeList); debug=0; if(debug==1) printf("\nApriori_Kernel returns from add element,thread_id:%d",thread_id); } } } } } } } __device__ void increment(int debug,Itemset *trans, ListItemset *Clist, int *tbitvec, int *cnt_ary ) { if(Clist->first()) { ListElement *head = Clist->first(); for(;head; head = head->next()) { Itemset *temp = head->item(); if (temp->subsequence(tbitvec, trans->numitems())) { //atomicAdd(&cnt_ary[temp->sup()],1); atomicAdd(&(temp->support),1); } } } } __device__ void subset(int debug,Itemset *trans, int st, int en, int final, HashTree* node, int k, int level, int thread_id, int *tbitvec, int *vist, int hash_function, int *cnt_ary) { int i; (*vist)++; int myvist = *vist; if (node == device_Candidate && node->is_leaf() && node->list() && node->list()->numitem>0) { increment(debug,trans, node->list(), tbitvec,cnt_ary); } else { for(i=st; i < en; i++) { int val = trans->item(i); int hashval = hash_index[val]; if (hashval == -1) continue; if ((device_hash_pos[thread_id])[level*hash_function+hashval] != myvist) { (device_hash_pos[thread_id])[level*hash_function+hashval] = myvist; if (node->hash_table_exists() ) { if(node->hash_table(hashval)) { if (node->hash_table(hashval)->is_leaf() && node->hash_table(hashval)->list() ) { increment(debug,trans, node->hash_table(hashval)->list() , tbitvec,cnt_ary); } else if (en+1 <= final) { subset( debug,trans, i+1, en+1, final,node->hash_table(hashval), k, level+1, thread_id, tbitvec, vist, hash_function,cnt_ary); } } } } } } } __device__ void form_large( int debug, int *device_counter_listElement, ListElement *device_free_listElement, ListItemset *device_Largelist , HashTree *node, int k, int &cnt, int *cntary, int min_sup) { if (node->is_leaf()) { ListItemset *list = node->list(); if(list->numitem>0 && list->first()) { ListElement *iter = list->first(); for(;iter;iter = iter->next()) { int temp_sup = iter->item()->sup() ; //iter->item()->set_sup(cntary[cnt++]); if (iter->item()->sup() >= min_sup) { ListElement *element = &device_free_listElement[*device_counter_listElement]; (*device_counter_listElement)++; element->Next=NULL; element->Item->support = -1; element->Item->copy(iter->item()); device_Largelist->sortedInsert(element, element->item()); /* if(debug==1) { printf("\n Itemset:"); for(int ss=0;ss<k;ss++) { printf("%d ",iter->item()->item(ss)); } }*/ for (int j=0; j < iter->item()->numitems(); j++) { hash_index[iter->item()->item(j)] = 0; } } } } } else{ for(int i=0; i < node->hash_function(); i++) if (node->hash_table(i)) form_large(debug,device_counter_listElement,device_free_listElement,device_Largelist,node->hash_table(i), k, cnt, cntary,min_sup); } } __global__ void Init_Kernel(int hash_function, int maxitem, HashTree *HashTree_FreeList, ListItemset *Largelist, ListElement *Largelist_listElement, int *Largelist_listElement_cntr, int *tree_listElement_cntr, int host_numitem) { int threshold=2; for (int i = 0; i < NUM_HT_FREELIST; i++) HashTree_FreeList_cntr[i] = max_HashFunction * i; device_Candidate = &HashTree_FreeList[HashTree_FreeList_cntr[0]]; HashTree_FreeList_cntr[0] += 1; device_Candidate->HashTree_Init(0, hash_function, threshold); Largelist->First= Largelist_listElement; Largelist->Last= Largelist_listElement + host_numitem-1; Largelist->numitem=host_numitem; *Largelist_listElement_cntr=host_numitem; /*int i; ListElement *temp = Largelist->First; for(i=0;i<host_numitem;i++) { printf("%d %d %d \n",temp->Item->theItemset[0],temp->Item->theItemset[1],temp->Item->support); temp=temp->Next; } *Largelist_listElement_cntr=host_numitem; printf("\n\nelement cntr: %d\n",*Largelist_listElement_cntr ); printf("\n\nelement cntr: %d\n",*tree_listElement_cntr );*/ } __global__ void AprioriGen_Kernel(int debug, int device_k, int total_threads, ListElement *tree_listElement, int *tree_listElement_cntr, ListItemset *Largelist, HashTree *HashTree_FreeList, int *gen_check, ListElement *temp_listElement) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; debug=0; apriori_gen(debug,total_threads,device_k, thread_id,tree_listElement,tree_listElement_cntr,Largelist,gen_check,HashTree_FreeList,temp_listElement); } __global__ void Subset_Kernel( int debug, int device_k, int records_per_thread, int num_trans, int maxitem, transaction *data_set, Itemset *temp_Itemset, int *cnt_ary, ListItemset *Largelist) { int blk,lb,ub,ll,i,vist,j,temp,device_more; int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //if(thread_id==0) //printf("\nSubset_Kernel Start"); blk = records_per_thread; lb = thread_id*blk; ub = min((thread_id+1)*blk, num_trans); vist = 1; device_more = (choose(Largelist->numitem,2)> 0); //printf("\n%d lb=%d ub=%d records_per_thread=%d",thread_id,lb,ub,records_per_thread); for (i=0; i <maxitem; i++){ trans_bitvec[thread_id][i]=0; } temp=((device_k)+1)*device_Candidate->Hash_function; for (ll=0; ll<temp; ll++) device_hash_pos[thread_id][ll]=0; if (device_more) { for(i=lb; i < ub;i++) { //printf("\n thread_id=%d lb=%d ub=%d records_per_thread=%d",thread_id,lb,ub,records_per_thread); make_Itemset(&temp_Itemset[thread_id], data_set[i].item_list, data_set[i].numitem, data_set[i].tid); for (j=0; j < temp_Itemset[thread_id].numitems(); j++) trans_bitvec[thread_id][temp_Itemset[thread_id].item(j)] = 1; subset(debug,&temp_Itemset[thread_id], 0, temp_Itemset[thread_id].numitems()-(device_k)+1,temp_Itemset[thread_id].numitems(), device_Candidate,device_k, 0, thread_id, trans_bitvec[thread_id], &vist, device_Candidate->hash_function(),cnt_ary); for (j=0; j < temp_Itemset[thread_id].numitems(); j++) trans_bitvec[thread_id][temp_Itemset[thread_id].item(j)] = 0; } } if(thread_id==0) { printf("\nSubset_Kernel END"); } } __device__ void print_large(ListItemset *device_Largelist) { for(int jj=0;jj<device_Largelist->numitems();jj++) { ListElement *iter = device_Largelist->node(jj); printf("\n Itemset:"); for(int ss=0;ss<iter->item()->numitems();ss++) { printf("%d ",iter->item()->item(ss)); } printf("\n sup:%d",iter->item()->sup()); } } __global__ void FormLargeList( int debug, int *device_more, int device_k, ListElement *Largelist_listElement, int *Largelist_listElement_cntr, ListItemset *Largelist, int maxitem, int min_sup, int *cnt_ary, HashTree *HashTree_FreeList) { int ccnntt=0,i; int NUM_INSERT,hash_function,threshold=2; //printf("\nLargelist_element_counter: %d",*Largelist_listElement_cntr); *Largelist_listElement_cntr=0; //temp_counter=0; Largelist->numitem=0; Largelist->First=NULL; Largelist->Last=NULL; for(i=0;i<maxitem;i++) hash_index[i]=-1; if(device_k==4) debug=1; form_large(debug,Largelist_listElement_cntr,Largelist_listElement,Largelist,device_Candidate,device_k, ccnntt, cnt_ary,min_sup); printf("\n\n(%d .ITER)it= %d", device_k, Largelist->numitems()); *device_more=(Largelist->numitems()>1); if(*device_more) { print_large(Largelist); NUM_INSERT = choose(Largelist->numitems(),2); hash_function = get_hash_function(NUM_INSERT,device_k+1); printf("\nHash :%d",hash_function); form_hash_indx(hash_index,hash_function,maxitem); for (int i = 0; i < NUM_HT_FREELIST; i++) HashTree_FreeList_cntr[i] = max_HashFunction * i; device_Candidate = &HashTree_FreeList[HashTree_FreeList_cntr[0]]; HashTree_FreeList_cntr[0] += 1; device_Candidate->HashTree_Init(0, hash_function, threshold); } else { print_large(Largelist); } } class stats { public: stats() { total=0; for(int i=0;i<120;i++) transactions[i]=0; } int total; int transactions[120]; }; void CUDA_entrypoint(transaction *host_data_set,int *host_cnt_ary,int records_per_thread,int nBlocks,int nthreads) { int *cnt_ary,*tree_listElement_cntr,*Largelist_listElement_cntr,*offsets; int *gen_check,*device_more,*host_offsets; int device_k,k,more,total_threads,i,j,idx,total_combinations,debug,temp_int,offt=0; //int div_const=1024*1024; transaction *data_set; //size_t total,free=0; Itemset *temp_itemset,*temp_itemset_ptr ; void *temp_ptr,*temp_ptr_2; ListItemset *Largelist,*Host_Largelist,*temp_list; ListElement *tree_listElement,*Largelist_listElement,*temp_listElement;; HashTree *HashTree_FreeList, **temp_HashTree; int host_hash_index[1001]; hipSetDevice(0); cout<<"\nDBASE_NUM_TRANS:"<<DBASE_NUM_TRANS<<" DBASE_MAXITEM:"<<DBASE_MAXITEM<<" DBASE_AVG_TRANS_SZ:"<<DBASE_AVG_TRANS_SZ<<" MIN SUPPORT:"<<MINSUPPORT; cout<<" records_per_thread:"<<records_per_thread<<"\n"; //hipMemGetInfo(&free,&total); //cout<<"\n!!FREE!!:"<<free/(div_const); offt=0; host_offsets = new int [DBASE_MAXITEM]; for (i=DBASE_MAXITEM-1; i >= 0; i--) { host_offsets[DBASE_MAXITEM-i-1] = offt; offt += i; } // offsets - Used to figure out the index of an candidate set for the count array cutilSafeCall(hipMalloc(&offsets,(size_t)DBASE_MAXITEM * sizeof(int))); cutilSafeCall(hipMemcpy(offsets,host_offsets,(size_t)DBASE_MAXITEM * sizeof(int),hipMemcpyHostToDevice)); // data_set Data set used to store items from DB cutilSafeCall(hipMalloc(&data_set,(size_t)DBASE_NUM_TRANS * sizeof(transaction))); for(i=0;i<DBASE_NUM_TRANS;i++) { cutilSafeCall(hipMemcpy(&(data_set[i].tid),&host_data_set[i].tid,sizeof(int),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(&(data_set[i].numitem),&host_data_set[i].numitem,sizeof(int),hipMemcpyHostToDevice)); temp_int=host_data_set[i].numitem; //allocate space for item array and copy the items into it cutilSafeCall(hipMalloc(&temp_ptr,(size_t)temp_int *sizeof(int))); cutilSafeCall(hipMemcpy(&(data_set[i].item_list),&temp_ptr,sizeof(int*),hipMemcpyHostToDevice)); cutilSafeCall(hipMemcpy(temp_ptr,host_data_set[i].item_list,(size_t)temp_int*sizeof(int),hipMemcpyHostToDevice)); } printf("Done 1 "); // cnt_ary Array used to count the support for items in the hash tree total_combinations = DBASE_MAXITEM * (DBASE_MAXITEM -1)/2; // N *N-1/2, given N items it is the max. number of combination cutilSafeCall(hipMalloc (&cnt_ary ,(size_t)(total_combinations)*sizeof(int))); cutilSafeCall(hipMemcpy(cnt_ary,host_cnt_ary,(total_combinations)*sizeof(int),hipMemcpyHostToDevice)); // tree_listElement - Allocation mem for listElements used in hash tree cutilSafeCall(hipMalloc(&(tree_listElement),((size_t)sizeof(ListElement)*max_large_list_size))); for(i=0;i<max_large_list_size;i++) { cutilSafeCall(hipMalloc(&(temp_itemset),(size_t)sizeof(Itemset))); cutilSafeCall(hipMemcpy(&(tree_listElement[i].Item),&(temp_itemset),sizeof(Itemset*),hipMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(hipMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(hipMemcpy(temp_itemset,&(temp_ptr),sizeof(int*),hipMemcpyHostToDevice)); } printf("Done 3 "); // tree_listElement_cntr - Counter for ListElements used in hash tree cutilSafeCall(hipMalloc(&tree_listElement_cntr,(size_t)sizeof(int))); cutilSafeCall(hipMemset(tree_listElement_cntr,0,sizeof(int))); // HashTree_FreeList - Allocating memory for the hash tree and the free list counters int * h_HashTree_FreeList_cntr; cutilSafeCall(hipMalloc(&h_HashTree_FreeList_cntr,sizeof(int) * NUM_HT_FREELIST)); cutilSafeCall(hipMemcpyToSymbol("HashTree_FreeList_cntr", &h_HashTree_FreeList_cntr, sizeof(int*), 0, hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc(&HashTree_FreeList,(size_t)(sizeof(HashTree)*max_HashFunction*NUM_HT_FREELIST))); size_t total_HashTable_size = max_HashFunction * max_HashFunction * NUM_HT_FREELIST; HashTree ** all_HashTree = NULL; cutilSafeCall(hipMalloc( &(all_HashTree),(size_t)sizeof(HashTree*)*total_HashTable_size)); cutilSafeCall(hipMemset(all_HashTree,0,sizeof(HashTree*)*total_HashTable_size)); printf("HashTreeFreeList pointer: %p, max_HashFunction = %d\n", HashTree_FreeList, max_HashFunction); for(i=0;i<max_HashFunction*NUM_HT_FREELIST;i++) { temp_HashTree = &(all_HashTree[i * max_HashFunction]); cutilSafeCall(hipMemcpy(&(HashTree_FreeList[i].Hash_table),&(temp_HashTree),sizeof(HashTree_FreeList[i].Hash_table),hipMemcpyHostToDevice)); cutilSafeCall(hipMalloc(&(temp_list),(size_t)sizeof(ListItemset))); cutilSafeCall(hipMemcpy( &(HashTree_FreeList[i].List_of_itemsets),&(temp_list),sizeof(ListItemset*),hipMemcpyHostToDevice)); //Init Values cutilSafeCall(hipMemset(temp_list,0,sizeof(ListElement*))); cutilSafeCall(hipMemset(&(temp_list->Last),0,sizeof(ListElement*))); cutilSafeCall(hipMemset(&(temp_list->numitem),0,sizeof(int))); if (i % 100 == 0) printf("\t progressing %d\n", i); } // set some initial values more=1; k=3; total_threads = nBlocks * nthreads; records_per_thread = ceil((double)DBASE_NUM_TRANS/(double)total_threads); // temp_listElement - allocating listElements which are used in apriori_gen to hold some temp values printf("Allocation: temp_listElement; total_threads = %d\n", total_threads); cutilSafeCall(hipMalloc(&(temp_listElement),(size_t)sizeof(ListElement)*total_threads)); for(i=0;i<total_threads;i++) { cutilSafeCall(hipMalloc(&(temp_itemset),(size_t)sizeof(Itemset))); cutilSafeCall(hipMemcpy(&(temp_listElement[i].Item),&(temp_itemset),sizeof(Itemset*),hipMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(hipMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(hipMemcpy(temp_itemset,&(temp_ptr),sizeof(int*),hipMemcpyHostToDevice)); if (i % 100 == 0) printf("\t progressing %d\n", i); } // temp_itemset - allocating itemsets which are used in subset to hold some temp values printf("Allocation: temp_itemset; total_threads = %d\n", total_threads); cutilSafeCall(hipMalloc(&(temp_itemset),(size_t)sizeof(Itemset)*total_threads)); for(i=0;i<total_threads;i++) { cutilSafeCall(hipMalloc(&temp_itemset_ptr,(size_t)sizeof(int)*max_trans_size)); cutilSafeCall(hipMemcpy(&temp_itemset[i],&temp_itemset_ptr,sizeof(temp_itemset_ptr),hipMemcpyHostToDevice)); if (i % 100 == 0) printf("\t progressing %d\n", i); } // device_more - outer loop variable cutilSafeCall(hipMalloc(&device_more,(size_t)sizeof(int))); //device_check_index - used for debugging purposes cutilSafeCall(hipMalloc(&device_check_index , (size_t)(nBlocks* sizeof(check_indexes)))); // gen_check - used for debugging purposes cutilSafeCall(hipMalloc(&gen_check,(size_t)sizeof(int)*10000)); //hipMemGetInfo(&free,&total); //cout<<"\n!!FREE!!:"<<free/(div_const); Host_Largelist = new ListItemset(); for(i=0; i <DBASE_MAXITEM; i++) host_hash_index[i] = -1; for(i=0; i < DBASE_MAXITEM-1; i++){ idx = host_offsets[i]-i-1; for (j=i+1; j < DBASE_MAXITEM; j++) { if (host_cnt_ary[idx+j] >= MINSUPPORT) { host_hash_index[i] = 0; host_hash_index[j] = 0; Itemset *it = new Itemset(2); it->set_numitems(2); it->add_item(0,i); it->add_item(1,j); it->set_sup(host_cnt_ary[idx+j]); ListElement *element = new ListElement(it); Host_Largelist->append(element); } } } ListElement *Host_TempListElement = Host_Largelist->First; int host_numitem = Host_Largelist->numitem; // Largelist_listElemenent - Allocating mem for listElements used in largelist printf("Allocation: Largelist_listelement; max_large_list_size = %d\n", max_large_list_size); cutilSafeCall(hipMalloc(&(Largelist_listElement),((size_t)sizeof(ListElement)*max_large_list_size))); for(i=0;i<max_large_list_size;i++) { cutilSafeCall(hipMalloc(&(temp_itemset_ptr),(size_t)sizeof(Itemset))); cutilSafeCall(hipMemcpy(&(Largelist_listElement[i].Item),&(temp_itemset_ptr),sizeof(Itemset*),hipMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(hipMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(hipMemcpy(temp_itemset_ptr,&(temp_ptr),sizeof(int*),hipMemcpyHostToDevice)); if(i<host_numitem) { cutilSafeCall(hipMemcpy(temp_ptr,Host_TempListElement->Item->theItemset,sizeof(int)*2,hipMemcpyHostToDevice)); temp_ptr_2 = &temp_itemset_ptr->theNumel; cutilSafeCall(hipMemcpy(temp_ptr_2,&Host_TempListElement->Item->theNumel,sizeof(int),hipMemcpyHostToDevice)); temp_ptr_2 = &temp_itemset_ptr->support; cutilSafeCall(hipMemcpy(temp_ptr_2,&Host_TempListElement->Item->support,sizeof(int),hipMemcpyHostToDevice)); Host_TempListElement = Host_TempListElement->Next; } if (i % 100 == 0) printf("\t progressing %d\n", i); } //set the next pointers in the Largelist for(i=0;i<host_numitem;i++) { temp_ptr_2 = &(Largelist_listElement[i+1]); cutilSafeCall(hipMemcpy(&(Largelist_listElement[i].Next),&(temp_ptr_2),sizeof(ListElement*),hipMemcpyHostToDevice)); } cutilSafeCall(hipMemset(&(Largelist_listElement[host_numitem-1].Next),0,sizeof(ListElement*))); // Largelist_listElement_cntr - counter for listElements used in Largelist cutilSafeCall(hipMalloc(&Largelist_listElement_cntr,(size_t)sizeof(int))); cutilSafeCall(hipMemset(Largelist_listElement_cntr,host_numitem,sizeof(int))); // Largelist is linked list of candidate sets cutilSafeCall(hipMalloc(&Largelist,(size_t)sizeof(ListItemset))); int NUM_INSERT = choose(host_numitem,2); int hash_function = get_hash_function(NUM_INSERT,2); form_hash_indx(host_hash_index,hash_function,DBASE_MAXITEM); //copy the host_hash_index to hash_index cutilSafeCall(hipMemcpyToSymbol(hash_index,&host_hash_index,sizeof(int)*1001)); printf("\n(2.ITER)it= %d", Host_Largelist->numitems()); printf("\nNUM_INSERT: %d",NUM_INSERT); hipLaunchKernelGGL(( Init_Kernel), dim3(1),dim3(1), 0, 0, hash_function, DBASE_MAXITEM, HashTree_FreeList, Largelist, Largelist_listElement, Largelist_listElement_cntr, tree_listElement_cntr, host_numitem); cout<<"\n Loop Start"; // hipThreadSetLimit(hipLimitStackSize,4096); for (k=3;more;k++) { debug=0; device_k=k; //hipMemset(&g_barrier,0,sizeof(int)); cutilSafeCall(hipMemset(tree_listElement_cntr,0,sizeof(int))); // if(k==3) // hipMemset(&addr_iteration,1,sizeof(int)); // else // hipMemset(&addr_iteration,0,sizeof(int)); hipLaunchKernelGGL(( AprioriGen_Kernel), dim3(nBlocks),dim3(nthreads), 0, 0, debug, device_k, total_threads, tree_listElement, tree_listElement_cntr, Largelist, HashTree_FreeList, gen_check, temp_listElement); cutilSafeCall(hipMemset(cnt_ary,0,(total_combinations)*sizeof(int))); hipLaunchKernelGGL(( Subset_Kernel), dim3(nBlocks),dim3(nthreads), 0, 0, debug, device_k, records_per_thread, DBASE_NUM_TRANS, DBASE_MAXITEM, data_set, temp_itemset, cnt_ary, Largelist); hipDeviceSynchronize(); hipLaunchKernelGGL(( FormLargeList), dim3(1),dim3(1), 0, 0, debug, device_more, device_k, Largelist_listElement, Largelist_listElement_cntr, Largelist, DBASE_MAXITEM, MINSUPPORT, cnt_ary, HashTree_FreeList); hipDeviceSynchronize(); cutilSafeCall(hipMemcpy(&more,device_more,sizeof(int),hipMemcpyDeviceToHost)); printf("Cleanup HashTreeFreeList Hash_table\n"); cutilSafeCall(hipMemset(all_HashTree, 0, sizeof(HashTree*)*total_HashTable_size)); } hipDeviceSynchronize(); /*****************CACHE LINES TOUCHED BY TANSACTIONS **************************************************************/ // int cache_line = 128; // bytes // int *host_addr_touched[120][2000],host_addr_counter[120]; // unsigned long key; // // std::map<unsigned long,stats>cacheline_trans; // // for(i=0;i<120;i++) // { // cutilSafeCall(hipMemcpy(host_addr_touched[i],addr_touched[i],2000*sizeof(int*),hipMemcpyDeviceToHost)); // } // cutilSafeCall(hipMemcpy(host_addr_counter,addr_counter,120*sizeof(int),hipMemcpyDeviceToHost)); // // printf("First memory locations: "); // for(i=0;i<120;i++) // printf(" %p",host_addr_touched[i][0]); // // for(i=0;i<120;i++) // { // for(j=0;j<2000;j++) // { // key=(unsigned long)host_addr_touched[i][j]; // // if( (key%cache_line) == 0 ) // Start of Cache Line // { // // cacheline_trans[key].total++; // cacheline_trans[key].transactions[i]=1; // // } // else // { // key = (key-(key%128));// find the start of the cache line // cacheline_trans[key].total++; // cacheline_trans[key].transactions[i]=1; // } // } // } /*******************************************************************************************************************/ /* FILE *fp = fopen("cuda_elements_gen","w"); host_gen_check =(int *)calloc(10000,sizeof(int)); cutilSafeCall(hipMemcpy(host_gen_check,gen_check,10000*sizeof(int),hipMemcpyDeviceToHost)); fprintf(fp,"\n"); int kk=0; for(i=0;i<10000;i++) { if(host_gen_check[i]>0 && host_gen_check[i]<1000) { fprintf(fp,"%d ",host_gen_check[i]); kk+=1; if(kk==3) { fprintf(fp,"\n"); kk=0; } } } */ /* check_index = (check_indexes*)calloc(nBlocks,sizeof(check_indexes)); cutilSafeCall(hipMemcpy(check_index,device_check_index,nBlocks*sizeof(check_indexes),hipMemcpyDeviceToHost)); printf("\nIndexes: "); for(i=0;i<nBlocks;i++) { printf("\nidx: %d",check_index[i].idx); printf("\nlb: %d",check_index[i].lb); printf("\nub: %d",check_index[i].ub); printf("\nblk: %d",check_index[i].blk); printf("\nm: %d",check_index[i].m); printf("\n"); } */ /* host_cnt_ary=(int*)calloc(total_combinations,sizeof(int)); cutilSafeCall(hipMemcpy(host_cnt_ary,cnt_ary,(total_combinations)*sizeof(int),hipMemcpyDeviceToHost)); FILE *output_file_count; output_file_count= fopen("cuda_output_count.txt","w"); for(i=0;i<2867;i++) { fprintf(output_file_count,"\n%d",host_cnt_ary[i]); } fclose(output_file_count);*/ /*FILE *output_file; output_file=fopen("cuda_output_data.txt","w"); for(i=0;i<DBASE_NUM_TRANS;i++) { // fprintf(output_file,"%d %d",host_data_set[i].tid,host_data_set[i].numitem); for(j=0;j<host_data_set[i].numitem;j++) { int tmp_index = host_item_offsets[i]+ j; fprintf(output_file,"%d ",host_item_array[tmp_index]); } fprintf(output_file,"\n"); } fprintf(output_file,"\n"); fclose(output_file);*/ printf("\nDONE\n"); }
50b204fa88bf22ad3f13070c07c72fa6a5de299c.cu
// To make CUDA Toolkit compatible with GCC 4.7 #undef _GLIBCXX_ATOMIC_BUILTINS #include <iostream> #include <fcntl.h> #include <fstream> #include <stdio.h> #include <stdlib.h> #include <stddef.h> #include <stdint.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #include <sys/types.h> #include <math.h> #include <string.h> #include <map> #include <cutil_inline.h> #include "Itemset.cu" #include "ListItemset.cu" #include "HashTree.cu" #include "pardhp.h" #include "global_constants.h" struct check_indexes{ int idx; int lb; int ub; int m; int blk; } *device_check_index,*check_index; __device__ int g_barrier; __device__ int temp_counter=0; __device__ int device_hash_pos[8000][10000]; __device__ int trans_bitvec[8000][1001]; __device__ int start[8000][30]; __device__ int enda[8000][30]; //__device__ int *addr_touched[120][2000]; //__device__ int addr_counter[120]; //__device__ int addr_iteration; __device__ int max_threshold; __device__ __host__ int choose(int n, int k) { int i; int val = 1; if (k >= 0 && k <= n){ for (i=n; i > n-k; i--) val *= i; for (i=2; i <= k; i++) val /= i; } return val; } __device__ __host__ int get_hash_function(int num, int k) { int threshold =2 ; int hash = (int)ceil(pow((float)num/threshold, (float)1.0/k)); if (hash < 1) hash = 1; return hash; } __device__ void gpu_interblock_sync(int goalVal) { int tid = threadIdx.x; if (tid==0) { atomicAdd(&g_barrier,1); //printf("\n%d %d",blockIdx.x,oldVal); } while(g_barrier!=goalVal) { } } __device__ __host__ void form_hash_indx( int *local_hash_index ,int hash_function, int maxitem) { int i, cnt; i=0; printf("\n HASH_FUNCTION = %d", hash_function); if (hash_function == 1) { return; } while(i < maxitem){ for(cnt = 0; i < maxitem && cnt < hash_function; i++) if (local_hash_index[i] == 0){ local_hash_index[i] = cnt; //printf("\n i: %d, val:%d",i,hash_index[i]); cnt++; } for(cnt = hash_function-1;i < maxitem && cnt >= 0; i++) if (local_hash_index[i] == 0){ local_hash_index[i] = cnt; //printf("\n i: %d, val:%d",i,hash_index[i]); cnt--; } } } __device__ int init_subsets(int *starts, int *endas, int num_item, int k_item) { int i; if (num_item < k_item) return 0; for (i=0; i < k_item; i++){ starts[i] = i; endas[i] = num_item - k_item + 1 + i; } return 1; } __device__ int get_next_subset(int *starts, int *endas, int k_item) { int i,j; for (i=k_item-1; i >= 0; i--){ starts[i]++; if (starts[i] < endas[i]){ for (j=i+1; j < k_item; j++) starts[j] = starts[j-1]+1; return 1; } } return 0; } __device__ ListElement* find_in_list(Itemset *element, int sz, ListElement *head, int thread_id) { for(;head; head = head->next()){ Itemset *curr = head->item(); for(int i=0; i < sz; i++){ int it = element->item((start[thread_id])[i]); if (curr->item(i) < it) break; else if (curr->item(i) > it) return NULL; else if (i==sz-1) return head; } } return NULL; } __device__ int not_prune(ListElement *curr, int k, ListElement *beg, int thread_id) { if (k+1 == 3){ start[thread_id][0] = 1; start[thread_id][1] = 2; if ((beg = find_in_list(curr->Item, k, beg, thread_id)) == NULL) return 0; } else{ int res = init_subsets(start[thread_id], enda[thread_id], curr->Item->numitems(), k); start[thread_id][k-2] = curr->Item->numitems()-2; start[thread_id][k-1] = curr->Item->numitems()-1; while (res){ if ((beg = find_in_list(curr->Item, k, beg, thread_id)) == NULL) return 0; res = get_next_subset(start[thread_id], enda[thread_id], k); } } return 1; } __device__ int ass_cnt = 0; __device__ void assign_lsupcnt_offset(HashTree *node, int &val) { if (node->is_leaf()) { ListItemset *list = node->list(); /*printf("\n first: %p next: %p",list->first(),list->First->Next); printf("\n last: %p next: %p",list->last(),list->Last->Next); printf("\n numitem: %d",list->numitem); */ if(list && list->first()) { ListElement *iter = list->first(); for(;iter;iter = iter->next()) { iter->item()->set_sup(val++); } } } else { for(int i=0; i < node->hash_function(); i++) if (node->hash_table(i)) { assign_lsupcnt_offset(node->hash_table(i), val); } } } __device__ inline void make_Itemset(Itemset *it, int *buf, int numitem, int tid) { int j; it->set_tid(tid); it->set_numitems(numitem); for (j=0; j < numitem; j++){ it->add_item(j, (int) buf[j]); } } __device__ void apriori_gen(int debug ,int total_threads,int k, int thread_id ,ListElement *tree_listElement, int *tree_listElement_cntr,ListItemset* Largelist, int*gen_check , HashTree * device_HashTree_FreeList,ListElement *temp_listElement) { //int ss,lb,ub,blk,index,counter=0,counter_2=0; int lb,ub,blk; blk = ceil((double)Largelist->numitem/(double)total_threads); if(blk==0) blk=1; if(blk==1 && thread_id>=Largelist->numitem) lb = -1; else { lb = thread_id *blk; } ub = min((thread_id+1)*blk, Largelist->numitem); if(lb>=0 && ub<=Largelist->numitem &&lb<Largelist->numitem) { // if (debug==1) //printf("\nApriori_gen thread_id= %d, blk= %d ,lb= %d, ub= %d, device_Largelist->numitems= %d nblocks= %d intraLock =%d", thread_id,blk, lb, ub, Largelist->numitem,total_threads,intraCTALock); ListElement *L1iter = Largelist->node(lb); for (int i=lb; i < ub && L1iter; i++, L1iter = L1iter->next()) { Itemset *temp = L1iter->item(); //printf("\n thread_id: %d ,i:%d ,item: %d",thread_id,i,temp->item(0)); ListElement *L2iter = L1iter->next(); for(;L2iter; L2iter = L2iter->next()) { if(debug==1) printf("\nbeg of inner loop,thread_id %d",thread_id); Itemset *temp2 = L2iter->item(); if (temp->compare(*temp2,k-2) < 0) break; else { if(debug==1) printf("\nelse part ,thread_id %d",thread_id); int k1 = temp->item(k-2); int k2 = temp2->item(k-2); if (k1 < k2) { // In order to save memory, we will use a temp listElement and if it passes the prune stage // we will allocate an actual listelement from the free list ListElement *it_temp = &temp_listElement[thread_id]; it_temp->Item->set_numitems(k); for (int l=0; l < temp->numitems(); l++) it_temp->Item->add_item(l,temp->item(l)); it_temp->Item->add_item(k-1, k2); ListElement *beg = Largelist->first(); if(debug==1) printf("\n create item list ,thread_id %d",thread_id); if(k==2 || not_prune(it_temp, k-1, beg, thread_id)) { if(debug==1) printf("\n inside prune ,thread_id %d",thread_id); int val = atomicAdd(tree_listElement_cntr,1); ListElement *it = &tree_listElement[val]; it->Item->set_numitems(k); it->Item->support=-1; for (int l=0; l < temp->numitems(); l++) it->Item->add_item(l,temp->item(l)); it->Item->add_item(k-1, k2); it->Item->set_sup(0); if(debug==1) printf("\nApriori_Kernel calls add element,thread_id:%d",thread_id); debug=0; device_Candidate->add_element(debug,it,thread_id,device_HashTree_FreeList); debug=0; if(debug==1) printf("\nApriori_Kernel returns from add element,thread_id:%d",thread_id); } } } } } } } __device__ void increment(int debug,Itemset *trans, ListItemset *Clist, int *tbitvec, int *cnt_ary ) { if(Clist->first()) { ListElement *head = Clist->first(); for(;head; head = head->next()) { Itemset *temp = head->item(); if (temp->subsequence(tbitvec, trans->numitems())) { //atomicAdd(&cnt_ary[temp->sup()],1); atomicAdd(&(temp->support),1); } } } } __device__ void subset(int debug,Itemset *trans, int st, int en, int final, HashTree* node, int k, int level, int thread_id, int *tbitvec, int *vist, int hash_function, int *cnt_ary) { int i; (*vist)++; int myvist = *vist; if (node == device_Candidate && node->is_leaf() && node->list() && node->list()->numitem>0) { increment(debug,trans, node->list(), tbitvec,cnt_ary); } else { for(i=st; i < en; i++) { int val = trans->item(i); int hashval = hash_index[val]; if (hashval == -1) continue; if ((device_hash_pos[thread_id])[level*hash_function+hashval] != myvist) { (device_hash_pos[thread_id])[level*hash_function+hashval] = myvist; if (node->hash_table_exists() ) { if(node->hash_table(hashval)) { if (node->hash_table(hashval)->is_leaf() && node->hash_table(hashval)->list() ) { increment(debug,trans, node->hash_table(hashval)->list() , tbitvec,cnt_ary); } else if (en+1 <= final) { subset( debug,trans, i+1, en+1, final,node->hash_table(hashval), k, level+1, thread_id, tbitvec, vist, hash_function,cnt_ary); } } } } } } } __device__ void form_large( int debug, int *device_counter_listElement, ListElement *device_free_listElement, ListItemset *device_Largelist , HashTree *node, int k, int &cnt, int *cntary, int min_sup) { if (node->is_leaf()) { ListItemset *list = node->list(); if(list->numitem>0 && list->first()) { ListElement *iter = list->first(); for(;iter;iter = iter->next()) { int temp_sup = iter->item()->sup() ; //iter->item()->set_sup(cntary[cnt++]); if (iter->item()->sup() >= min_sup) { ListElement *element = &device_free_listElement[*device_counter_listElement]; (*device_counter_listElement)++; element->Next=NULL; element->Item->support = -1; element->Item->copy(iter->item()); device_Largelist->sortedInsert(element, element->item()); /* if(debug==1) { printf("\n Itemset:"); for(int ss=0;ss<k;ss++) { printf("%d ",iter->item()->item(ss)); } }*/ for (int j=0; j < iter->item()->numitems(); j++) { hash_index[iter->item()->item(j)] = 0; } } } } } else{ for(int i=0; i < node->hash_function(); i++) if (node->hash_table(i)) form_large(debug,device_counter_listElement,device_free_listElement,device_Largelist,node->hash_table(i), k, cnt, cntary,min_sup); } } __global__ void Init_Kernel(int hash_function, int maxitem, HashTree *HashTree_FreeList, ListItemset *Largelist, ListElement *Largelist_listElement, int *Largelist_listElement_cntr, int *tree_listElement_cntr, int host_numitem) { int threshold=2; for (int i = 0; i < NUM_HT_FREELIST; i++) HashTree_FreeList_cntr[i] = max_HashFunction * i; device_Candidate = &HashTree_FreeList[HashTree_FreeList_cntr[0]]; HashTree_FreeList_cntr[0] += 1; device_Candidate->HashTree_Init(0, hash_function, threshold); Largelist->First= Largelist_listElement; Largelist->Last= Largelist_listElement + host_numitem-1; Largelist->numitem=host_numitem; *Largelist_listElement_cntr=host_numitem; /*int i; ListElement *temp = Largelist->First; for(i=0;i<host_numitem;i++) { printf("%d %d %d \n",temp->Item->theItemset[0],temp->Item->theItemset[1],temp->Item->support); temp=temp->Next; } *Largelist_listElement_cntr=host_numitem; printf("\n\nelement cntr: %d\n",*Largelist_listElement_cntr ); printf("\n\nelement cntr: %d\n",*tree_listElement_cntr );*/ } __global__ void AprioriGen_Kernel(int debug, int device_k, int total_threads, ListElement *tree_listElement, int *tree_listElement_cntr, ListItemset *Largelist, HashTree *HashTree_FreeList, int *gen_check, ListElement *temp_listElement) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; debug=0; apriori_gen(debug,total_threads,device_k, thread_id,tree_listElement,tree_listElement_cntr,Largelist,gen_check,HashTree_FreeList,temp_listElement); } __global__ void Subset_Kernel( int debug, int device_k, int records_per_thread, int num_trans, int maxitem, transaction *data_set, Itemset *temp_Itemset, int *cnt_ary, ListItemset *Largelist) { int blk,lb,ub,ll,i,vist,j,temp,device_more; int thread_id = blockDim.x * blockIdx.x + threadIdx.x; //if(thread_id==0) //printf("\nSubset_Kernel Start"); blk = records_per_thread; lb = thread_id*blk; ub = min((thread_id+1)*blk, num_trans); vist = 1; device_more = (choose(Largelist->numitem,2)> 0); //printf("\n%d lb=%d ub=%d records_per_thread=%d",thread_id,lb,ub,records_per_thread); for (i=0; i <maxitem; i++){ trans_bitvec[thread_id][i]=0; } temp=((device_k)+1)*device_Candidate->Hash_function; for (ll=0; ll<temp; ll++) device_hash_pos[thread_id][ll]=0; if (device_more) { for(i=lb; i < ub;i++) { //printf("\n thread_id=%d lb=%d ub=%d records_per_thread=%d",thread_id,lb,ub,records_per_thread); make_Itemset(&temp_Itemset[thread_id], data_set[i].item_list, data_set[i].numitem, data_set[i].tid); for (j=0; j < temp_Itemset[thread_id].numitems(); j++) trans_bitvec[thread_id][temp_Itemset[thread_id].item(j)] = 1; subset(debug,&temp_Itemset[thread_id], 0, temp_Itemset[thread_id].numitems()-(device_k)+1,temp_Itemset[thread_id].numitems(), device_Candidate,device_k, 0, thread_id, trans_bitvec[thread_id], &vist, device_Candidate->hash_function(),cnt_ary); for (j=0; j < temp_Itemset[thread_id].numitems(); j++) trans_bitvec[thread_id][temp_Itemset[thread_id].item(j)] = 0; } } if(thread_id==0) { printf("\nSubset_Kernel END"); } } __device__ void print_large(ListItemset *device_Largelist) { for(int jj=0;jj<device_Largelist->numitems();jj++) { ListElement *iter = device_Largelist->node(jj); printf("\n Itemset:"); for(int ss=0;ss<iter->item()->numitems();ss++) { printf("%d ",iter->item()->item(ss)); } printf("\n sup:%d",iter->item()->sup()); } } __global__ void FormLargeList( int debug, int *device_more, int device_k, ListElement *Largelist_listElement, int *Largelist_listElement_cntr, ListItemset *Largelist, int maxitem, int min_sup, int *cnt_ary, HashTree *HashTree_FreeList) { int ccnntt=0,i; int NUM_INSERT,hash_function,threshold=2; //printf("\nLargelist_element_counter: %d",*Largelist_listElement_cntr); *Largelist_listElement_cntr=0; //temp_counter=0; Largelist->numitem=0; Largelist->First=NULL; Largelist->Last=NULL; for(i=0;i<maxitem;i++) hash_index[i]=-1; if(device_k==4) debug=1; form_large(debug,Largelist_listElement_cntr,Largelist_listElement,Largelist,device_Candidate,device_k, ccnntt, cnt_ary,min_sup); printf("\n\n(%d .ITER)it= %d", device_k, Largelist->numitems()); *device_more=(Largelist->numitems()>1); if(*device_more) { print_large(Largelist); NUM_INSERT = choose(Largelist->numitems(),2); hash_function = get_hash_function(NUM_INSERT,device_k+1); printf("\nHash :%d",hash_function); form_hash_indx(hash_index,hash_function,maxitem); for (int i = 0; i < NUM_HT_FREELIST; i++) HashTree_FreeList_cntr[i] = max_HashFunction * i; device_Candidate = &HashTree_FreeList[HashTree_FreeList_cntr[0]]; HashTree_FreeList_cntr[0] += 1; device_Candidate->HashTree_Init(0, hash_function, threshold); } else { print_large(Largelist); } } class stats { public: stats() { total=0; for(int i=0;i<120;i++) transactions[i]=0; } int total; int transactions[120]; }; void CUDA_entrypoint(transaction *host_data_set,int *host_cnt_ary,int records_per_thread,int nBlocks,int nthreads) { int *cnt_ary,*tree_listElement_cntr,*Largelist_listElement_cntr,*offsets; int *gen_check,*device_more,*host_offsets; int device_k,k,more,total_threads,i,j,idx,total_combinations,debug,temp_int,offt=0; //int div_const=1024*1024; transaction *data_set; //size_t total,free=0; Itemset *temp_itemset,*temp_itemset_ptr ; void *temp_ptr,*temp_ptr_2; ListItemset *Largelist,*Host_Largelist,*temp_list; ListElement *tree_listElement,*Largelist_listElement,*temp_listElement;; HashTree *HashTree_FreeList, **temp_HashTree; int host_hash_index[1001]; cudaSetDevice(0); cout<<"\nDBASE_NUM_TRANS:"<<DBASE_NUM_TRANS<<" DBASE_MAXITEM:"<<DBASE_MAXITEM<<" DBASE_AVG_TRANS_SZ:"<<DBASE_AVG_TRANS_SZ<<" MIN SUPPORT:"<<MINSUPPORT; cout<<" records_per_thread:"<<records_per_thread<<"\n"; //cudaMemGetInfo(&free,&total); //cout<<"\n!!FREE!!:"<<free/(div_const); offt=0; host_offsets = new int [DBASE_MAXITEM]; for (i=DBASE_MAXITEM-1; i >= 0; i--) { host_offsets[DBASE_MAXITEM-i-1] = offt; offt += i; } // offsets - Used to figure out the index of an candidate set for the count array cutilSafeCall(cudaMalloc(&offsets,(size_t)DBASE_MAXITEM * sizeof(int))); cutilSafeCall(cudaMemcpy(offsets,host_offsets,(size_t)DBASE_MAXITEM * sizeof(int),cudaMemcpyHostToDevice)); // data_set Data set used to store items from DB cutilSafeCall(cudaMalloc(&data_set,(size_t)DBASE_NUM_TRANS * sizeof(transaction))); for(i=0;i<DBASE_NUM_TRANS;i++) { cutilSafeCall(cudaMemcpy(&(data_set[i].tid),&host_data_set[i].tid,sizeof(int),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(&(data_set[i].numitem),&host_data_set[i].numitem,sizeof(int),cudaMemcpyHostToDevice)); temp_int=host_data_set[i].numitem; //allocate space for item array and copy the items into it cutilSafeCall(cudaMalloc(&temp_ptr,(size_t)temp_int *sizeof(int))); cutilSafeCall(cudaMemcpy(&(data_set[i].item_list),&temp_ptr,sizeof(int*),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMemcpy(temp_ptr,host_data_set[i].item_list,(size_t)temp_int*sizeof(int),cudaMemcpyHostToDevice)); } printf("Done 1 "); // cnt_ary Array used to count the support for items in the hash tree total_combinations = DBASE_MAXITEM * (DBASE_MAXITEM -1)/2; // N *N-1/2, given N items it is the max. number of combination cutilSafeCall(cudaMalloc (&cnt_ary ,(size_t)(total_combinations)*sizeof(int))); cutilSafeCall(cudaMemcpy(cnt_ary,host_cnt_ary,(total_combinations)*sizeof(int),cudaMemcpyHostToDevice)); // tree_listElement - Allocation mem for listElements used in hash tree cutilSafeCall(cudaMalloc(&(tree_listElement),((size_t)sizeof(ListElement)*max_large_list_size))); for(i=0;i<max_large_list_size;i++) { cutilSafeCall(cudaMalloc(&(temp_itemset),(size_t)sizeof(Itemset))); cutilSafeCall(cudaMemcpy(&(tree_listElement[i].Item),&(temp_itemset),sizeof(Itemset*),cudaMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(cudaMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(cudaMemcpy(temp_itemset,&(temp_ptr),sizeof(int*),cudaMemcpyHostToDevice)); } printf("Done 3 "); // tree_listElement_cntr - Counter for ListElements used in hash tree cutilSafeCall(cudaMalloc(&tree_listElement_cntr,(size_t)sizeof(int))); cutilSafeCall(cudaMemset(tree_listElement_cntr,0,sizeof(int))); // HashTree_FreeList - Allocating memory for the hash tree and the free list counters int * h_HashTree_FreeList_cntr; cutilSafeCall(cudaMalloc(&h_HashTree_FreeList_cntr,sizeof(int) * NUM_HT_FREELIST)); cutilSafeCall(cudaMemcpyToSymbol("HashTree_FreeList_cntr", &h_HashTree_FreeList_cntr, sizeof(int*), 0, cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc(&HashTree_FreeList,(size_t)(sizeof(HashTree)*max_HashFunction*NUM_HT_FREELIST))); size_t total_HashTable_size = max_HashFunction * max_HashFunction * NUM_HT_FREELIST; HashTree ** all_HashTree = NULL; cutilSafeCall(cudaMalloc( &(all_HashTree),(size_t)sizeof(HashTree*)*total_HashTable_size)); cutilSafeCall(cudaMemset(all_HashTree,0,sizeof(HashTree*)*total_HashTable_size)); printf("HashTreeFreeList pointer: %p, max_HashFunction = %d\n", HashTree_FreeList, max_HashFunction); for(i=0;i<max_HashFunction*NUM_HT_FREELIST;i++) { temp_HashTree = &(all_HashTree[i * max_HashFunction]); cutilSafeCall(cudaMemcpy(&(HashTree_FreeList[i].Hash_table),&(temp_HashTree),sizeof(HashTree_FreeList[i].Hash_table),cudaMemcpyHostToDevice)); cutilSafeCall(cudaMalloc(&(temp_list),(size_t)sizeof(ListItemset))); cutilSafeCall(cudaMemcpy( &(HashTree_FreeList[i].List_of_itemsets),&(temp_list),sizeof(ListItemset*),cudaMemcpyHostToDevice)); //Init Values cutilSafeCall(cudaMemset(temp_list,0,sizeof(ListElement*))); cutilSafeCall(cudaMemset(&(temp_list->Last),0,sizeof(ListElement*))); cutilSafeCall(cudaMemset(&(temp_list->numitem),0,sizeof(int))); if (i % 100 == 0) printf("\t progressing %d\n", i); } // set some initial values more=1; k=3; total_threads = nBlocks * nthreads; records_per_thread = ceil((double)DBASE_NUM_TRANS/(double)total_threads); // temp_listElement - allocating listElements which are used in apriori_gen to hold some temp values printf("Allocation: temp_listElement; total_threads = %d\n", total_threads); cutilSafeCall(cudaMalloc(&(temp_listElement),(size_t)sizeof(ListElement)*total_threads)); for(i=0;i<total_threads;i++) { cutilSafeCall(cudaMalloc(&(temp_itemset),(size_t)sizeof(Itemset))); cutilSafeCall(cudaMemcpy(&(temp_listElement[i].Item),&(temp_itemset),sizeof(Itemset*),cudaMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(cudaMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(cudaMemcpy(temp_itemset,&(temp_ptr),sizeof(int*),cudaMemcpyHostToDevice)); if (i % 100 == 0) printf("\t progressing %d\n", i); } // temp_itemset - allocating itemsets which are used in subset to hold some temp values printf("Allocation: temp_itemset; total_threads = %d\n", total_threads); cutilSafeCall(cudaMalloc(&(temp_itemset),(size_t)sizeof(Itemset)*total_threads)); for(i=0;i<total_threads;i++) { cutilSafeCall(cudaMalloc(&temp_itemset_ptr,(size_t)sizeof(int)*max_trans_size)); cutilSafeCall(cudaMemcpy(&temp_itemset[i],&temp_itemset_ptr,sizeof(temp_itemset_ptr),cudaMemcpyHostToDevice)); if (i % 100 == 0) printf("\t progressing %d\n", i); } // device_more - outer loop variable cutilSafeCall(cudaMalloc(&device_more,(size_t)sizeof(int))); //device_check_index - used for debugging purposes cutilSafeCall(cudaMalloc(&device_check_index , (size_t)(nBlocks* sizeof(check_indexes)))); // gen_check - used for debugging purposes cutilSafeCall(cudaMalloc(&gen_check,(size_t)sizeof(int)*10000)); //cudaMemGetInfo(&free,&total); //cout<<"\n!!FREE!!:"<<free/(div_const); Host_Largelist = new ListItemset(); for(i=0; i <DBASE_MAXITEM; i++) host_hash_index[i] = -1; for(i=0; i < DBASE_MAXITEM-1; i++){ idx = host_offsets[i]-i-1; for (j=i+1; j < DBASE_MAXITEM; j++) { if (host_cnt_ary[idx+j] >= MINSUPPORT) { host_hash_index[i] = 0; host_hash_index[j] = 0; Itemset *it = new Itemset(2); it->set_numitems(2); it->add_item(0,i); it->add_item(1,j); it->set_sup(host_cnt_ary[idx+j]); ListElement *element = new ListElement(it); Host_Largelist->append(element); } } } ListElement *Host_TempListElement = Host_Largelist->First; int host_numitem = Host_Largelist->numitem; // Largelist_listElemenent - Allocating mem for listElements used in largelist printf("Allocation: Largelist_listelement; max_large_list_size = %d\n", max_large_list_size); cutilSafeCall(cudaMalloc(&(Largelist_listElement),((size_t)sizeof(ListElement)*max_large_list_size))); for(i=0;i<max_large_list_size;i++) { cutilSafeCall(cudaMalloc(&(temp_itemset_ptr),(size_t)sizeof(Itemset))); cutilSafeCall(cudaMemcpy(&(Largelist_listElement[i].Item),&(temp_itemset_ptr),sizeof(Itemset*),cudaMemcpyHostToDevice)); // since the array is the first element of the Itemset class, we can use the location pointed to be temp_itemset to allocate space for this array cutilSafeCall(cudaMalloc(&(temp_ptr),(size_t)sizeof(int)*max_candidate_size)); cutilSafeCall(cudaMemcpy(temp_itemset_ptr,&(temp_ptr),sizeof(int*),cudaMemcpyHostToDevice)); if(i<host_numitem) { cutilSafeCall(cudaMemcpy(temp_ptr,Host_TempListElement->Item->theItemset,sizeof(int)*2,cudaMemcpyHostToDevice)); temp_ptr_2 = &temp_itemset_ptr->theNumel; cutilSafeCall(cudaMemcpy(temp_ptr_2,&Host_TempListElement->Item->theNumel,sizeof(int),cudaMemcpyHostToDevice)); temp_ptr_2 = &temp_itemset_ptr->support; cutilSafeCall(cudaMemcpy(temp_ptr_2,&Host_TempListElement->Item->support,sizeof(int),cudaMemcpyHostToDevice)); Host_TempListElement = Host_TempListElement->Next; } if (i % 100 == 0) printf("\t progressing %d\n", i); } //set the next pointers in the Largelist for(i=0;i<host_numitem;i++) { temp_ptr_2 = &(Largelist_listElement[i+1]); cutilSafeCall(cudaMemcpy(&(Largelist_listElement[i].Next),&(temp_ptr_2),sizeof(ListElement*),cudaMemcpyHostToDevice)); } cutilSafeCall(cudaMemset(&(Largelist_listElement[host_numitem-1].Next),0,sizeof(ListElement*))); // Largelist_listElement_cntr - counter for listElements used in Largelist cutilSafeCall(cudaMalloc(&Largelist_listElement_cntr,(size_t)sizeof(int))); cutilSafeCall(cudaMemset(Largelist_listElement_cntr,host_numitem,sizeof(int))); // Largelist is linked list of candidate sets cutilSafeCall(cudaMalloc(&Largelist,(size_t)sizeof(ListItemset))); int NUM_INSERT = choose(host_numitem,2); int hash_function = get_hash_function(NUM_INSERT,2); form_hash_indx(host_hash_index,hash_function,DBASE_MAXITEM); //copy the host_hash_index to hash_index cutilSafeCall(cudaMemcpyToSymbol(hash_index,&host_hash_index,sizeof(int)*1001)); printf("\n(2.ITER)it= %d", Host_Largelist->numitems()); printf("\nNUM_INSERT: %d",NUM_INSERT); Init_Kernel<<<1,1>>>(hash_function, DBASE_MAXITEM, HashTree_FreeList, Largelist, Largelist_listElement, Largelist_listElement_cntr, tree_listElement_cntr, host_numitem); cout<<"\n Loop Start"; // cudaThreadSetLimit(cudaLimitStackSize,4096); for (k=3;more;k++) { debug=0; device_k=k; //cudaMemset(&g_barrier,0,sizeof(int)); cutilSafeCall(cudaMemset(tree_listElement_cntr,0,sizeof(int))); // if(k==3) // cudaMemset(&addr_iteration,1,sizeof(int)); // else // cudaMemset(&addr_iteration,0,sizeof(int)); AprioriGen_Kernel<<<nBlocks,nthreads>>>(debug, device_k, total_threads, tree_listElement, tree_listElement_cntr, Largelist, HashTree_FreeList, gen_check, temp_listElement); cutilSafeCall(cudaMemset(cnt_ary,0,(total_combinations)*sizeof(int))); Subset_Kernel<<<nBlocks,nthreads>>>(debug, device_k, records_per_thread, DBASE_NUM_TRANS, DBASE_MAXITEM, data_set, temp_itemset, cnt_ary, Largelist); cudaThreadSynchronize(); FormLargeList<<<1,1>>>( debug, device_more, device_k, Largelist_listElement, Largelist_listElement_cntr, Largelist, DBASE_MAXITEM, MINSUPPORT, cnt_ary, HashTree_FreeList); cudaThreadSynchronize(); cutilSafeCall(cudaMemcpy(&more,device_more,sizeof(int),cudaMemcpyDeviceToHost)); printf("Cleanup HashTreeFreeList Hash_table\n"); cutilSafeCall(cudaMemset(all_HashTree, 0, sizeof(HashTree*)*total_HashTable_size)); } cudaThreadSynchronize(); /*****************CACHE LINES TOUCHED BY TANSACTIONS **************************************************************/ // int cache_line = 128; // bytes // int *host_addr_touched[120][2000],host_addr_counter[120]; // unsigned long key; // // std::map<unsigned long,stats>cacheline_trans; // // for(i=0;i<120;i++) // { // cutilSafeCall(cudaMemcpy(host_addr_touched[i],addr_touched[i],2000*sizeof(int*),cudaMemcpyDeviceToHost)); // } // cutilSafeCall(cudaMemcpy(host_addr_counter,addr_counter,120*sizeof(int),cudaMemcpyDeviceToHost)); // // printf("First memory locations: "); // for(i=0;i<120;i++) // printf(" %p",host_addr_touched[i][0]); // // for(i=0;i<120;i++) // { // for(j=0;j<2000;j++) // { // key=(unsigned long)host_addr_touched[i][j]; // // if( (key%cache_line) == 0 ) // Start of Cache Line // { // // cacheline_trans[key].total++; // cacheline_trans[key].transactions[i]=1; // // } // else // { // key = (key-(key%128));// find the start of the cache line // cacheline_trans[key].total++; // cacheline_trans[key].transactions[i]=1; // } // } // } /*******************************************************************************************************************/ /* FILE *fp = fopen("cuda_elements_gen","w"); host_gen_check =(int *)calloc(10000,sizeof(int)); cutilSafeCall(cudaMemcpy(host_gen_check,gen_check,10000*sizeof(int),cudaMemcpyDeviceToHost)); fprintf(fp,"\n"); int kk=0; for(i=0;i<10000;i++) { if(host_gen_check[i]>0 && host_gen_check[i]<1000) { fprintf(fp,"%d ",host_gen_check[i]); kk+=1; if(kk==3) { fprintf(fp,"\n"); kk=0; } } } */ /* check_index = (check_indexes*)calloc(nBlocks,sizeof(check_indexes)); cutilSafeCall(cudaMemcpy(check_index,device_check_index,nBlocks*sizeof(check_indexes),cudaMemcpyDeviceToHost)); printf("\nIndexes: "); for(i=0;i<nBlocks;i++) { printf("\nidx: %d",check_index[i].idx); printf("\nlb: %d",check_index[i].lb); printf("\nub: %d",check_index[i].ub); printf("\nblk: %d",check_index[i].blk); printf("\nm: %d",check_index[i].m); printf("\n"); } */ /* host_cnt_ary=(int*)calloc(total_combinations,sizeof(int)); cutilSafeCall(cudaMemcpy(host_cnt_ary,cnt_ary,(total_combinations)*sizeof(int),cudaMemcpyDeviceToHost)); FILE *output_file_count; output_file_count= fopen("cuda_output_count.txt","w"); for(i=0;i<2867;i++) { fprintf(output_file_count,"\n%d",host_cnt_ary[i]); } fclose(output_file_count);*/ /*FILE *output_file; output_file=fopen("cuda_output_data.txt","w"); for(i=0;i<DBASE_NUM_TRANS;i++) { // fprintf(output_file,"%d %d",host_data_set[i].tid,host_data_set[i].numitem); for(j=0;j<host_data_set[i].numitem;j++) { int tmp_index = host_item_offsets[i]+ j; fprintf(output_file,"%d ",host_item_array[tmp_index]); } fprintf(output_file,"\n"); } fprintf(output_file,"\n"); fclose(output_file);*/ printf("\nDONE\n"); }
d2756fa4ab4a67c5dbe895f38cca806d9c58383c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #define CHANNELS 3 #define in_COMPONENT_COLOR 255 #define RGB_COMPONENT_COLOR 255 #define HALF_WIDTH 4 typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; __global__ void colCon(unsigned char * outImg, unsigned char *inImage, int width, int height, int pwidth, int sz) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int n=sz*2+1; //int sa=n*n; int r[51], g[51], b[51]; //int *arr; if(x<width && y<height){ int idxI = (y+sz) * pwidth + (x+sz); int filIdx=idxI; idxI=idxI-((pwidth)*(sz)+(sz)); for(int i=0;i<n;i++){ for(int j=0; j<n; j++){ int idx=i*n+j; // arr[idx]=(int)(256*256*inImage[CHANNELS*idxI]+256*inImage[CHANNELS*idxI+1]+ // inImage[CHANNELS*idxI+2]); r[idx]=(int)inImage[CHANNELS*idxI]; g[idx]=(int)inImage[CHANNELS*idxI+1]; b[idx]=(int)inImage[CHANNELS*idxI+2]; idxI++; } idxI+=pwidth-n-1; } for(int i=0;i<n*n-1;i++){ for(int j=0;j<n*n-i-1;j++){ // if(arr[j]>arr[j+1]){ // int temp=arr[j]; // arr[j]=arr[j+1]; // arr[j+1]=temp; if(r[j]>r[j+1]){ int temp=r[j]; r[j]=r[j+1]; r[j+1]=temp; } if(g[j]>g[j+1]){ int temp=g[j]; g[j]=g[j+1]; g[j+1]=temp; } if(b[j]>b[j+1]){ int temp=b[j]; b[j]=b[j+1]; b[j+1]=temp; } } } // int mid=(n*n)/2; // outImg[CHANNELS*filIdx+2]=arr[mid]%256; // arr[mid]=arr[mid]>>8; // outImg[CHANNELS*filIdx+1]=arr[mid]%256; // arr[mid]=arr[mid]>>8; // outImg[CHANNELS*filIdx]=arr[mid]%256; outImg[CHANNELS*filIdx]=(unsigned char)r[(n*n)/2]; outImg[CHANNELS*filIdx+1]=(unsigned char)g[(n*n)/2]; outImg[CHANNELS*filIdx+2]=(unsigned char)b[(n*n)/2]; } } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments9 c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } PPMImage *padImage(PPMImage *img,int size){ PPMImage *paddedImg = (PPMImage *)malloc(sizeof(PPMImage)); int h = (img->y+2*size); int w = (img->x+2*size); printf("size: %d, h: %d, w: %d\n", size,h,w); paddedImg->data = (PPMPixel*)malloc(h * w * sizeof(PPMPixel)); paddedImg->x = w; paddedImg->y = h; memset(paddedImg->data, 0, h * w * sizeof(unsigned char)); printf("Image Initialized\n"); for(int i=0; i<img->y; i++){ for(int j=0; j<img->x; j++){ int idxP=(i+size)*w+(j+size); int idx=i*img->x+j; //printf("i: %d, j: %d, idx: %d, idxP: %d\n",i,j,idx,idxP); paddedImg->data[idxP]= img->data[idx]; //paddedImg->data[idxP].green = img->data[idx].green; //paddedImg->data[idxP].blue = img->data[idx].blue; } } for(int j=0;j<size;j++ ){ for(int i=0;i<paddedImg->y;i++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[i*w+size]; } } for(int j=paddedImg->x-size ; j< paddedImg->x;j++){ for(int i=0;i<paddedImg->y;i++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[i*w-size-1]; } } for(int i=0;i<size;i++){ for(int j=0;j<paddedImg->x;j++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[(size)*w+j]; } } for(int i=(paddedImg->y-size);i<(paddedImg->y);i++){ for(int j=0;j<paddedImg->x;j++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[(paddedImg->y-size-1)*w+j]; } } return paddedImg; } int main(int argc, char **argv){ PPMImage *image,*filtered,*padedImg; unsigned char *in, *out; int bytes, sz, n; float elapsed; sz= atoi(argv[1]); printf("Size Initialized\n"); n = 2*sz+1; image = readPPM(argv[2]); printf("Before padding: h: %d w: %d\n", image->y, image->x); padedImg=padImage(image,sz); bytes = padedImg->x * padedImg->y * 3; out = (unsigned char*)malloc(bytes * sizeof(unsigned char)); printf("After padding: h: %d w: %d", padedImg->y, padedImg->x); hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc(&in, bytes * sizeof(unsigned char)); hipMalloc(&out, bytes * sizeof(unsigned char)); hipMemcpy(in,padedImg->data,bytes,hipMemcpyHostToDevice); dim3 gridSize((padedImg->x-1)/16+1,(padedImg->y-1)/16+1,1); dim3 blockSize(16,16,1); hipEventRecord(start); hipLaunchKernelGGL(( colCon), dim3(gridSize),dim3(blockSize), 0, 0, out,in,image->x,image->y,padedImg->x,sz); hipEventRecord(stop); hipDeviceSynchronize(); hipEventElapsedTime(&elapsed, start, stop); printf(",%f\n", elapsed); filtered = (PPMImage *)malloc(sizeof(PPMImage)); filtered->x = padedImg->x; filtered->y = padedImg->y; filtered->data = (PPMPixel*)malloc(bytes * sizeof(PPMPixel)); hipMemcpy(filtered->data,out,bytes,hipMemcpyDeviceToHost); writePPM("medianOutput.ppm",filtered); //printf("conversion complete"); hipFree(in); hipFree(out); return 0; }
d2756fa4ab4a67c5dbe895f38cca806d9c58383c.cu
#include<stdio.h> #include<stdlib.h> #define CHANNELS 3 #define in_COMPONENT_COLOR 255 #define RGB_COMPONENT_COLOR 255 #define HALF_WIDTH 4 typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; __global__ void colCon(unsigned char * outImg, unsigned char *inImage, int width, int height, int pwidth, int sz) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int n=sz*2+1; //int sa=n*n; int r[51], g[51], b[51]; //int *arr; if(x<width && y<height){ int idxI = (y+sz) * pwidth + (x+sz); int filIdx=idxI; idxI=idxI-((pwidth)*(sz)+(sz)); for(int i=0;i<n;i++){ for(int j=0; j<n; j++){ int idx=i*n+j; // arr[idx]=(int)(256*256*inImage[CHANNELS*idxI]+256*inImage[CHANNELS*idxI+1]+ // inImage[CHANNELS*idxI+2]); r[idx]=(int)inImage[CHANNELS*idxI]; g[idx]=(int)inImage[CHANNELS*idxI+1]; b[idx]=(int)inImage[CHANNELS*idxI+2]; idxI++; } idxI+=pwidth-n-1; } for(int i=0;i<n*n-1;i++){ for(int j=0;j<n*n-i-1;j++){ // if(arr[j]>arr[j+1]){ // int temp=arr[j]; // arr[j]=arr[j+1]; // arr[j+1]=temp; if(r[j]>r[j+1]){ int temp=r[j]; r[j]=r[j+1]; r[j+1]=temp; } if(g[j]>g[j+1]){ int temp=g[j]; g[j]=g[j+1]; g[j+1]=temp; } if(b[j]>b[j+1]){ int temp=b[j]; b[j]=b[j+1]; b[j+1]=temp; } } } // int mid=(n*n)/2; // outImg[CHANNELS*filIdx+2]=arr[mid]%256; // arr[mid]=arr[mid]>>8; // outImg[CHANNELS*filIdx+1]=arr[mid]%256; // arr[mid]=arr[mid]>>8; // outImg[CHANNELS*filIdx]=arr[mid]%256; outImg[CHANNELS*filIdx]=(unsigned char)r[(n*n)/2]; outImg[CHANNELS*filIdx+1]=(unsigned char)g[(n*n)/2]; outImg[CHANNELS*filIdx+2]=(unsigned char)b[(n*n)/2]; } } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments9 c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } PPMImage *padImage(PPMImage *img,int size){ PPMImage *paddedImg = (PPMImage *)malloc(sizeof(PPMImage)); int h = (img->y+2*size); int w = (img->x+2*size); printf("size: %d, h: %d, w: %d\n", size,h,w); paddedImg->data = (PPMPixel*)malloc(h * w * sizeof(PPMPixel)); paddedImg->x = w; paddedImg->y = h; memset(paddedImg->data, 0, h * w * sizeof(unsigned char)); printf("Image Initialized\n"); for(int i=0; i<img->y; i++){ for(int j=0; j<img->x; j++){ int idxP=(i+size)*w+(j+size); int idx=i*img->x+j; //printf("i: %d, j: %d, idx: %d, idxP: %d\n",i,j,idx,idxP); paddedImg->data[idxP]= img->data[idx]; //paddedImg->data[idxP].green = img->data[idx].green; //paddedImg->data[idxP].blue = img->data[idx].blue; } } for(int j=0;j<size;j++ ){ for(int i=0;i<paddedImg->y;i++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[i*w+size]; } } for(int j=paddedImg->x-size ; j< paddedImg->x;j++){ for(int i=0;i<paddedImg->y;i++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[i*w-size-1]; } } for(int i=0;i<size;i++){ for(int j=0;j<paddedImg->x;j++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[(size)*w+j]; } } for(int i=(paddedImg->y-size);i<(paddedImg->y);i++){ for(int j=0;j<paddedImg->x;j++){ int idxP=i*w+j; paddedImg->data[idxP]=paddedImg->data[(paddedImg->y-size-1)*w+j]; } } return paddedImg; } int main(int argc, char **argv){ PPMImage *image,*filtered,*padedImg; unsigned char *in, *out; int bytes, sz, n; float elapsed; sz= atoi(argv[1]); printf("Size Initialized\n"); n = 2*sz+1; image = readPPM(argv[2]); printf("Before padding: h: %d w: %d\n", image->y, image->x); padedImg=padImage(image,sz); bytes = padedImg->x * padedImg->y * 3; out = (unsigned char*)malloc(bytes * sizeof(unsigned char)); printf("After padding: h: %d w: %d", padedImg->y, padedImg->x); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc(&in, bytes * sizeof(unsigned char)); cudaMalloc(&out, bytes * sizeof(unsigned char)); cudaMemcpy(in,padedImg->data,bytes,cudaMemcpyHostToDevice); dim3 gridSize((padedImg->x-1)/16+1,(padedImg->y-1)/16+1,1); dim3 blockSize(16,16,1); cudaEventRecord(start); colCon<<<gridSize,blockSize>>>(out,in,image->x,image->y,padedImg->x,sz); cudaEventRecord(stop); cudaDeviceSynchronize(); cudaEventElapsedTime(&elapsed, start, stop); printf(",%f\n", elapsed); filtered = (PPMImage *)malloc(sizeof(PPMImage)); filtered->x = padedImg->x; filtered->y = padedImg->y; filtered->data = (PPMPixel*)malloc(bytes * sizeof(PPMPixel)); cudaMemcpy(filtered->data,out,bytes,cudaMemcpyDeviceToHost); writePPM("medianOutput.ppm",filtered); //printf("conversion complete"); cudaFree(in); cudaFree(out); return 0; }
72060bbdb7f103e5b9b518a495eaa3936390b872.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) { int numThreads = blockDim.x * gridDim.x; //total number of threads int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread int i = 0; /*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/ for(i = tid; i <= numElements; i += numThreads) { //since this is an exclusive prefix sum, if the distance is 0, every element in the output should be set to the previous element //of the intput if(distance == 0 ){ //check for an out of bounds to start if( i == 0){ newSum[i] = 0; }else{ //make everything in the new output equal to the prev of the input newSum[i] = oldSum[i-1]; } }else{ //distance/stride != 0, we start adding. if(i >= distance){ //first make sure we dont array indexes less than 0. newSum[i] = oldSum[i] + oldSum[i-distance]; // the actual scan }else{ // if the distance is somehow less than 0 newSum[i] = oldSum[i]; } } } }
72060bbdb7f103e5b9b518a495eaa3936390b872.cu
/* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> __global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) { int numThreads = blockDim.x * gridDim.x; //total number of threads int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread int i = 0; /*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/ for(i = tid; i <= numElements; i += numThreads) { //since this is an exclusive prefix sum, if the distance is 0, every element in the output should be set to the previous element //of the intput if(distance == 0 ){ //check for an out of bounds to start if( i == 0){ newSum[i] = 0; }else{ //make everything in the new output equal to the prev of the input newSum[i] = oldSum[i-1]; } }else{ //distance/stride != 0, we start adding. if(i >= distance){ //first make sure we dont array indexes less than 0. newSum[i] = oldSum[i] + oldSum[i-distance]; // the actual scan }else{ // if the distance is somehow less than 0 newSum[i] = oldSum[i]; } } } }
83a2cac02d184cac8fa21abf8a1e44ee1f503d09.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of WarpScan utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <typeinfo> #include <cub/warp/warp_scan.cuh> #include <hipcub/hipcub.hpp> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- static const int NUM_WARPS = 2; bool g_verbose = false; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); /** * Primitive variant to test */ enum TestMode { BASIC, AGGREGATE, }; /** * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) */ template<typename OpT> struct WrapperFunctor { OpT op; WrapperFunctor(OpT op) : op(op) {} template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return op(a, b); } }; //--------------------------------------------------------------------- // Test kernels //--------------------------------------------------------------------- /// Exclusive scan basic template <typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.ExclusiveScan(data, data, initial_value, scan_op); } /// Exclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveScan(data, data, initial_value, scan_op, aggregate); } /// Exclusive sum basic template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.ExclusiveSum(data, data); } /// Exclusive sum aggregate template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveSum(data, data, aggregate); } /// Inclusive scan basic template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.InclusiveScan(data, data, scan_op); } /// Inclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveScan(data, data, scan_op, aggregate); } /// Inclusive sum basic template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.InclusiveSum(data, data); } /// Inclusive sum aggregate template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveSum(data, data, aggregate); } /** * WarpScan test kernel */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> __global__ void WarpScanKernel( T *d_in, T *d_out, T *d_aggregate, ScanOpT scan_op, InitialValueT initial_value, clock_t *d_elapsed) { // Cooperative warp-scan utility type (1 warp) typedef WarpScan<T, LOGICAL_WARP_THREADS> WarpScanT; // Allocate temp storage in shared memory __shared__ typename WarpScanT::TempStorage temp_storage[NUM_WARPS]; // Get warp index int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; // Per-thread tile data T data = d_in[threadIdx.x]; // Start cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t start = clock(); __threadfence_block(); // workaround to prevent clock hoisting T aggregate; // Test scan WarpScanT warp_scan(temp_storage[warp_id]); DeviceTest( warp_scan, data, initial_value, scan_op, aggregate, Int2Type<TEST_MODE>(), Int2Type<Traits<T>::PRIMITIVE>()); // Stop cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t stop = clock(); __threadfence_block(); // workaround to prevent clock hoisting // Store data d_out[threadIdx.x] = data; if (TEST_MODE != BASIC) { // Store aggregate d_aggregate[threadIdx.x] = aggregate; } // Store time if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; } } //--------------------------------------------------------------------- // Host utility subroutines //--------------------------------------------------------------------- /** * Initialize exclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, T initial_value, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; h_reference[i] = initial_value; T inclusive = scan_op(initial_value, h_in[i]); for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); h_reference[i] = inclusive; inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); } warp_aggregates[w] = warp_aggregate; } } /** * Initialize inclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, NullType, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; T inclusive = h_in[i]; h_reference[i] = inclusive; for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); h_reference[i] = inclusive; } warp_aggregates[w] = warp_aggregate; } } /** * Test warp scan */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> // NullType implies inclusive-scan, otherwise inclusive scan void Test( GenMode gen_mode, ScanOpT scan_op, InitialValueT initial_value) { enum { TOTAL_ITEMS = LOGICAL_WARP_THREADS * NUM_WARPS, }; // Allocate host arrays T *h_in = new T[TOTAL_ITEMS]; T *h_reference = new T[TOTAL_ITEMS]; T *h_aggregate = new T[TOTAL_ITEMS]; // Initialize problem T aggregates[NUM_WARPS]; Initialize( gen_mode, h_in, h_reference, LOGICAL_WARP_THREADS, scan_op, initial_value, aggregates); if (g_verbose) { printf("Input: \n"); DisplayResults(h_in, TOTAL_ITEMS); printf("\n"); } for (int w = 0; w < NUM_WARPS; ++w) { for (int i = 0; i < LOGICAL_WARP_THREADS; ++i) { h_aggregate[(w * LOGICAL_WARP_THREADS) + i] = aggregates[w]; } } // Initialize/clear device arrays T *d_in = NULL; T *d_out = NULL; T *d_aggregate = NULL; clock_t *d_elapsed = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * TOTAL_ITEMS, hipMemcpyHostToDevice)); CubDebugExit(hipMemset(d_out, 0, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(hipMemset(d_aggregate, 0, sizeof(T) * TOTAL_ITEMS)); // Run kernel printf("Test-mode %d (%s), gen-mode %d (%s), %s warpscan, %d warp threads, %s (%d bytes) elements:\n", TEST_MODE, typeid(TEST_MODE).name(), gen_mode, typeid(gen_mode).name(), (Equals<InitialValueT, NullType>::VALUE) ? "Inclusive" : "Exclusive", LOGICAL_WARP_THREADS, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Run aggregate/prefix kernel hipLaunchKernelGGL(( WarpScanKernel<LOGICAL_WARP_THREADS, TEST_MODE>), dim3(1), dim3(TOTAL_ITEMS), 0, 0, d_in, d_out, d_aggregate, scan_op, initial_value, d_elapsed); printf("\tElapsed clocks: "); DisplayDeviceResults(d_elapsed, 1); CubDebugExit(hipPeekAtLastError()); CubDebugExit(hipDeviceSynchronize()); // Copy out and display results printf("\tScan results: "); int compare = CompareDeviceResults(h_reference, d_out, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Copy out and display aggregate if (TEST_MODE == AGGREGATE) { printf("\tScan aggregate: "); compare = CompareDeviceResults(h_aggregate, d_aggregate, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_aggregate) delete[] h_aggregate; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); } /** * Run battery of tests for different primitive variants */ template < int LOGICAL_WARP_THREADS, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { // Exclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, T()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, T()); // Exclusive (non-specialized, so we can use initial-value) Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); // Inclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, NullType()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, NullType()); } /** * Run battery of tests for different data types and scan ops */ template <int LOGICAL_WARP_THREADS> void Test(GenMode gen_mode) { // Get device ordinal int device_ordinal; CubDebugExit(hipGetDevice(&device_ordinal)); // Get ptx version int ptx_version; CubDebugExit(PtxVersion(ptx_version)); // primitive Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long long) 99); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (float) 99); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (double) 99); } // primitive (alternative scan op) Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned long long) 99); // vec-2 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uchar2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ushort2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uint2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulong2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulonglong2(17, 21)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float2(17, 21)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double2(17, 21)); } // vec-4 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_char4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_short4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_int4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_long4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_longlong4(17, 21, 32, 85)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float4(17, 21, 32, 85)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double4(17, 21, 32, 85)); } // complex Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestBar(17, 21)); } /** * Run battery of tests for different problem generation options */ template <int LOGICAL_WARP_THREADS> void Test() { Test<LOGICAL_WARP_THREADS>(UNIFORM); Test<LOGICAL_WARP_THREADS>(INTEGER_SEED); Test<LOGICAL_WARP_THREADS>(RANDOM); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #ifdef QUICK_TEST // Compile/run quick tests Test<32, AGGREGATE, int>(UNIFORM, Sum(), (int) 0); Test<32, AGGREGATE, float>(UNIFORM, Sum(), (float) 0); Test<32, AGGREGATE, long long>(UNIFORM, Sum(), (long long) 0); Test<32, AGGREGATE, double>(UNIFORM, Sum(), (double) 0); typedef KeyValuePair<int, float> T; hipcub::Sum sum_op; Test<32, AGGREGATE, T>(UNIFORM, ReduceBySegmentOp<hipcub::Sum>(sum_op), T()); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test logical warp sizes Test<32>(); Test<16>(); Test<9>(); Test<2>(); } #endif return 0; }
83a2cac02d184cac8fa21abf8a1e44ee1f503d09.cu
/****************************************************************************** * Copyright (c) 2011, Duane Merrill. All rights reserved. * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ******************************************************************************/ /****************************************************************************** * Test of WarpScan utilities ******************************************************************************/ // Ensure printing of CUDA runtime errors to console #define CUB_STDERR #include <stdio.h> #include <typeinfo> #include <cub/warp/warp_scan.cuh> #include <cub/util_allocator.cuh> #include "test_util.h" using namespace cub; //--------------------------------------------------------------------- // Globals, constants and typedefs //--------------------------------------------------------------------- static const int NUM_WARPS = 2; bool g_verbose = false; int g_repeat = 0; CachingDeviceAllocator g_allocator(true); /** * Primitive variant to test */ enum TestMode { BASIC, AGGREGATE, }; /** * \brief WrapperFunctor (for precluding test-specialized dispatch to *Sum variants) */ template<typename OpT> struct WrapperFunctor { OpT op; WrapperFunctor(OpT op) : op(op) {} template <typename T> __host__ __device__ __forceinline__ T operator()(const T &a, const T &b) const { return op(a, b); } }; //--------------------------------------------------------------------- // Test kernels //--------------------------------------------------------------------- /// Exclusive scan basic template <typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.ExclusiveScan(data, data, initial_value, scan_op); } /// Exclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveScan(data, data, initial_value, scan_op, aggregate); } /// Exclusive sum basic template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.ExclusiveSum(data, data); } /// Exclusive sum aggregate template < typename WarpScanT, typename T> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, T &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.ExclusiveSum(data, data, aggregate); } /// Inclusive scan basic template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<BASIC> test_mode, IsPrimitiveT is_primitive) { // Test basic warp scan warp_scan.InclusiveScan(data, data, scan_op); } /// Inclusive scan aggregate template < typename WarpScanT, typename T, typename ScanOpT, typename IsPrimitiveT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, ScanOpT &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, IsPrimitiveT is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveScan(data, data, scan_op, aggregate); } /// Inclusive sum basic template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<BASIC> test_mode, Int2Type<true> is_primitive) { // Test basic warp scan warp_scan.InclusiveSum(data, data); } /// Inclusive sum aggregate template < typename WarpScanT, typename T, typename InitialValueT> __device__ __forceinline__ void DeviceTest( WarpScanT &warp_scan, T &data, NullType &initial_value, Sum &scan_op, T &aggregate, Int2Type<AGGREGATE> test_mode, Int2Type<true> is_primitive) { // Test with cumulative aggregate warp_scan.InclusiveSum(data, data, aggregate); } /** * WarpScan test kernel */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> __global__ void WarpScanKernel( T *d_in, T *d_out, T *d_aggregate, ScanOpT scan_op, InitialValueT initial_value, clock_t *d_elapsed) { // Cooperative warp-scan utility type (1 warp) typedef WarpScan<T, LOGICAL_WARP_THREADS> WarpScanT; // Allocate temp storage in shared memory __shared__ typename WarpScanT::TempStorage temp_storage[NUM_WARPS]; // Get warp index int warp_id = threadIdx.x / LOGICAL_WARP_THREADS; // Per-thread tile data T data = d_in[threadIdx.x]; // Start cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t start = clock(); __threadfence_block(); // workaround to prevent clock hoisting T aggregate; // Test scan WarpScanT warp_scan(temp_storage[warp_id]); DeviceTest( warp_scan, data, initial_value, scan_op, aggregate, Int2Type<TEST_MODE>(), Int2Type<Traits<T>::PRIMITIVE>()); // Stop cycle timer __threadfence_block(); // workaround to prevent clock hoisting clock_t stop = clock(); __threadfence_block(); // workaround to prevent clock hoisting // Store data d_out[threadIdx.x] = data; if (TEST_MODE != BASIC) { // Store aggregate d_aggregate[threadIdx.x] = aggregate; } // Store time if (threadIdx.x == 0) { *d_elapsed = (start > stop) ? start - stop : stop - start; } } //--------------------------------------------------------------------- // Host utility subroutines //--------------------------------------------------------------------- /** * Initialize exclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, T initial_value, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; h_reference[i] = initial_value; T inclusive = scan_op(initial_value, h_in[i]); for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); h_reference[i] = inclusive; inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); } warp_aggregates[w] = warp_aggregate; } } /** * Initialize inclusive-scan problem (and solution) */ template < typename T, typename ScanOpT> void Initialize( GenMode gen_mode, T *h_in, T *h_reference, int logical_warp_items, ScanOpT scan_op, NullType, T warp_aggregates[NUM_WARPS]) { for (int w = 0; w < NUM_WARPS; ++w) { int base_idx = (w * logical_warp_items); int i = base_idx; InitValue(gen_mode, h_in[i], i); T warp_aggregate = h_in[i]; T inclusive = h_in[i]; h_reference[i] = inclusive; for (i = i + 1; i < base_idx + logical_warp_items; ++i) { InitValue(gen_mode, h_in[i], i); inclusive = scan_op(inclusive, h_in[i]); warp_aggregate = scan_op(warp_aggregate, h_in[i]); h_reference[i] = inclusive; } warp_aggregates[w] = warp_aggregate; } } /** * Test warp scan */ template < int LOGICAL_WARP_THREADS, TestMode TEST_MODE, typename T, typename ScanOpT, typename InitialValueT> // NullType implies inclusive-scan, otherwise inclusive scan void Test( GenMode gen_mode, ScanOpT scan_op, InitialValueT initial_value) { enum { TOTAL_ITEMS = LOGICAL_WARP_THREADS * NUM_WARPS, }; // Allocate host arrays T *h_in = new T[TOTAL_ITEMS]; T *h_reference = new T[TOTAL_ITEMS]; T *h_aggregate = new T[TOTAL_ITEMS]; // Initialize problem T aggregates[NUM_WARPS]; Initialize( gen_mode, h_in, h_reference, LOGICAL_WARP_THREADS, scan_op, initial_value, aggregates); if (g_verbose) { printf("Input: \n"); DisplayResults(h_in, TOTAL_ITEMS); printf("\n"); } for (int w = 0; w < NUM_WARPS; ++w) { for (int i = 0; i < LOGICAL_WARP_THREADS; ++i) { h_aggregate[(w * LOGICAL_WARP_THREADS) + i] = aggregates[w]; } } // Initialize/clear device arrays T *d_in = NULL; T *d_out = NULL; T *d_aggregate = NULL; clock_t *d_elapsed = NULL; CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_aggregate, sizeof(T) * TOTAL_ITEMS)); CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(clock_t))); CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * TOTAL_ITEMS, cudaMemcpyHostToDevice)); CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * (TOTAL_ITEMS + 1))); CubDebugExit(cudaMemset(d_aggregate, 0, sizeof(T) * TOTAL_ITEMS)); // Run kernel printf("Test-mode %d (%s), gen-mode %d (%s), %s warpscan, %d warp threads, %s (%d bytes) elements:\n", TEST_MODE, typeid(TEST_MODE).name(), gen_mode, typeid(gen_mode).name(), (Equals<InitialValueT, NullType>::VALUE) ? "Inclusive" : "Exclusive", LOGICAL_WARP_THREADS, typeid(T).name(), (int) sizeof(T)); fflush(stdout); // Run aggregate/prefix kernel WarpScanKernel<LOGICAL_WARP_THREADS, TEST_MODE><<<1, TOTAL_ITEMS>>>( d_in, d_out, d_aggregate, scan_op, initial_value, d_elapsed); printf("\tElapsed clocks: "); DisplayDeviceResults(d_elapsed, 1); CubDebugExit(cudaPeekAtLastError()); CubDebugExit(cudaDeviceSynchronize()); // Copy out and display results printf("\tScan results: "); int compare = CompareDeviceResults(h_reference, d_out, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); // Copy out and display aggregate if (TEST_MODE == AGGREGATE) { printf("\tScan aggregate: "); compare = CompareDeviceResults(h_aggregate, d_aggregate, TOTAL_ITEMS, g_verbose, g_verbose); printf("%s\n", compare ? "FAIL" : "PASS"); AssertEquals(0, compare); } // Cleanup if (h_in) delete[] h_in; if (h_reference) delete[] h_reference; if (h_aggregate) delete[] h_aggregate; if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in)); if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out)); if (d_aggregate) CubDebugExit(g_allocator.DeviceFree(d_aggregate)); if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed)); } /** * Run battery of tests for different primitive variants */ template < int LOGICAL_WARP_THREADS, typename ScanOpT, typename T> void Test( GenMode gen_mode, ScanOpT scan_op, T initial_value) { // Exclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, T()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, T()); // Exclusive (non-specialized, so we can use initial-value) Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, WrapperFunctor<ScanOpT>(scan_op), initial_value); // Inclusive Test<LOGICAL_WARP_THREADS, BASIC, T>(gen_mode, scan_op, NullType()); Test<LOGICAL_WARP_THREADS, AGGREGATE, T>(gen_mode, scan_op, NullType()); } /** * Run battery of tests for different data types and scan ops */ template <int LOGICAL_WARP_THREADS> void Test(GenMode gen_mode) { // Get device ordinal int device_ordinal; CubDebugExit(cudaGetDevice(&device_ordinal)); // Get ptx version int ptx_version; CubDebugExit(PtxVersion(ptx_version)); // primitive Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (long long) 99); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (float) 99); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), (double) 99); } // primitive (alternative scan op) Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned char) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned short) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned int) 99); Test<LOGICAL_WARP_THREADS>(gen_mode, Max(), (unsigned long long) 99); // vec-2 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uchar2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ushort2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_uint2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulong2(17, 21)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_ulonglong2(17, 21)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float2(17, 21)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double2(17, 21)); } // vec-4 Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_char4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_short4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_int4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_long4(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_longlong4(17, 21, 32, 85)); if (gen_mode != RANDOM) { // Only test numerically stable inputs Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_float4(17, 21, 32, 85)); if (ptx_version > 100) Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), make_double4(17, 21, 32, 85)); } // complex Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestFoo::MakeTestFoo(17, 21, 32, 85)); Test<LOGICAL_WARP_THREADS>(gen_mode, Sum(), TestBar(17, 21)); } /** * Run battery of tests for different problem generation options */ template <int LOGICAL_WARP_THREADS> void Test() { Test<LOGICAL_WARP_THREADS>(UNIFORM); Test<LOGICAL_WARP_THREADS>(INTEGER_SEED); Test<LOGICAL_WARP_THREADS>(RANDOM); } /** * Main */ int main(int argc, char** argv) { // Initialize command line CommandLineArgs args(argc, argv); g_verbose = args.CheckCmdLineFlag("v"); args.GetCmdLineArgument("repeat", g_repeat); // Print usage if (args.CheckCmdLineFlag("help")) { printf("%s " "[--device=<device-id>] " "[--repeat=<repetitions of entire test suite>]" "[--v] " "\n", argv[0]); exit(0); } // Initialize device CubDebugExit(args.DeviceInit()); #ifdef QUICK_TEST // Compile/run quick tests Test<32, AGGREGATE, int>(UNIFORM, Sum(), (int) 0); Test<32, AGGREGATE, float>(UNIFORM, Sum(), (float) 0); Test<32, AGGREGATE, long long>(UNIFORM, Sum(), (long long) 0); Test<32, AGGREGATE, double>(UNIFORM, Sum(), (double) 0); typedef KeyValuePair<int, float> T; cub::Sum sum_op; Test<32, AGGREGATE, T>(UNIFORM, ReduceBySegmentOp<cub::Sum>(sum_op), T()); #else // Compile/run thorough tests for (int i = 0; i <= g_repeat; ++i) { // Test logical warp sizes Test<32>(); Test<16>(); Test<9>(); Test<2>(); } #endif return 0; }
9c9cd98a1317e013ca0c1f746e0d5fb43b662d6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Brian Chrzanowski * Tue Nov 19, 2019 21:02 * * NOTE (brian) * * - The threaded implementation can rely on memory where the CUDA model * can't _really_. IE, the working storage allocated by "molt.h" won't * do our graphics card any good. Because the library (even the * custom routine) expects that it will have the result of a given operation, * reorg or step in the destination hunk of memory, we'll have to copy it * to and from the graphics card. * * There are some optimizations that we can do though. * * 1) We don't ever expect the values for the v and w weights to ever change. * So, we can allocate storage for those at the beginning of the journey, * and keep those allocated until the library calls "molt_custom_close". * * 2) Unfortunately, the library expects it to be able to give us source * and destination memory hunks as it needs to. Because of this, * the custom implementation can't just keep the problem state on-device * the entire time, as that would require an equivalent amount of * allocations to the host device. * * To get around this, on every custom library call (reorg and sweep), we * can compare the work, dst, and src pointers to what we got "last" time. * If they're different, we need to copy from host to device, to update * what the device is using to perform the operation. * * TODO (brian) * 1. Test with copying from host to device and host to device every time */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> #include <limits.h> #include <math.h> #include "../common.h" #define MOLT_IMPLEMENTATION #include "../molt.h" #if !(defined(_WIN32) || defined(_WIN64)) #define __declspec(x) #endif struct molt_custommod_t { f64 *d_src, *d_work, *d_dst; f64 *l_src, *l_dst; // the last pointers we've seen struct molt_cfg_t config; ivec3_t *d_dim; f64 *d_v[6]; f64 *d_w[6]; f64 *h_v[6]; f64 *h_w[6]; }; static struct molt_custommod_t g_mod; #define GPUASSERT(v) { gpu_assert((v), (char *)__FILE__, __LINE__); } /* gpu_assert : exits if the condition is true */ inline void gpu_assert(hipError_t code, char *file, int line) { if (code != hipSuccess) { fprintf(stderr, "GPUASSERT: %s:%d %s\n", file, line, hipGetErrorString(code)); exit(1); } } /* alloc_and_copy : allocs space on device, copies 'size' bytes from host */ int alloc_and_copy(f64 **d, f64 **newh, f64 *oldh, size_t size); /* copy_if_needed : copies from host to device if needed, returns device ptr */ f64 *copy_if_needed(f64 *device, f64 *last_host, f64 *curr_host, size_t size); /* cuda_gfquad_m : green's function quadriture on the input vector (CUDA) */ __device__ void cuda_gfquad_m(f64 *dst, f64 *src, f64 dnu, f64 *wl, f64 *wr, s64 len, s32 M); /* molt_makel : applies dirichlet boundary conditions to the line in (CUDA) */ __device__ void cuda_makel(f64 *src, f64 *vl, f64 *vr, f64 minval, s64 len); /* cuda_sweep : the cuda parallel'd sweeping function */ __global__ void cuda_sweep(f64 *dst, f64 *src, f64 *vl, f64 *vr, f64 *wl, f64 *wr, f64 minval, f64 dnu, s32 M, ivec3_t dim); /* mk_genericidx : retrieves a generic index from input dimensionality */ __device__ u64 cuda_genericidx(ivec3_t ival, ivec3_t idim, cvec3_t order); /* cuda_reorg : the cuda parallel'd transposition function */ __global__ void cuda_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t src_ord, cvec3_t dst_ord); /* alloc_and_copy : allocs space on device, copies 'size' bytes from host */ int alloc_and_copy(f64 **d, f64 **newh, f64 *oldh, size_t size) { hipError_t err; err = hipMalloc((void **)d, size); if (err != hipSuccess) { return -2; } err = hipMemcpy((void *)*d, (void *)oldh, size, hipMemcpyHostToDevice); if (err != hipSuccess) { return -1; } // save the host pointer for later use *newh = oldh; return 0; } /* copy_if_needed : if needed, updates host pointer and data */ f64 *copy_if_needed(f64 *device, f64 **last_host, f64 *curr_host, size_t size) { if ((*last_host) != curr_host) { *last_host = curr_host; hipMemcpy(device, curr_host, size, hipMemcpyHostToDevice); } return device; } /* molt_custom_init : intializes the custom module */ extern "C" __declspec(dllexport) int molt_custom_open(struct molt_custom_t *custom) { u64 elements; struct molt_cfg_t *cfg; hipError_t err; ivec3_t pinc; ivec3_t points; int rc; memset(&g_mod, 0, sizeof(struct molt_custommod_t)); // snag a copy of the config structure we'll use // thoughout the module's lifetime memcpy(&g_mod.config, custom->cfg, sizeof(struct molt_cfg_t)); cfg = &g_mod.config; molt_cfg_parampull_xyz(cfg, pinc, MOLT_PARAM_PINC); molt_cfg_parampull_xyz(cfg, points, MOLT_PARAM_POINTS); elements = pinc[0] * (u64)pinc[1] * pinc[2]; rc = alloc_and_copy(&g_mod.d_v[0], &g_mod.h_v[0], custom->vlx, points[0] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[1], &g_mod.h_v[1], custom->vrx, points[0] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[2], &g_mod.h_v[2], custom->vly, points[1] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[3], &g_mod.h_v[3], custom->vry, points[1] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[4], &g_mod.h_v[4], custom->vlz, points[2] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[5], &g_mod.h_v[5], custom->vrz, points[2] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[0], &g_mod.h_w[0], custom->wlx, cfg->x_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[1], &g_mod.h_w[1], custom->wrx, cfg->x_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[2], &g_mod.h_w[2], custom->wly, cfg->y_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[3], &g_mod.h_w[3], custom->wry, cfg->y_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[4], &g_mod.h_w[4], custom->wlz, cfg->z_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[5], &g_mod.h_w[5], custom->wrz, cfg->z_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } // because we don't need to copy from the host, we'll just use cuda funcs err = hipMalloc(&g_mod.d_src, elements * sizeof(f64)); if (err != hipSuccess) { return -1; } err = hipMalloc(&g_mod.d_dst, elements * sizeof(f64)); if (err != hipSuccess) { return -1; } err = hipMalloc(&g_mod.d_work, elements * sizeof(f64)); if (err != hipSuccess) { return -1; } // NOTE (Brian) somehow, when I was testing on WIN32 this completely evaded me, but as far // as I can tell, you have to setup the CUDA system in a very special mode to seamlessly pass // host pointers and things to the device. // // Doing so would completely pollute the 'core' molt library and have a dependence on CUDA. // So, we'll just copy the ~12 bytes every time, and be done with it. err = hipMalloc(&g_mod.d_dim, sizeof g_mod.d_dim); if (err != hipSuccess) { return -1; } return 0; } /* molt_custom_close : cleans up the custom module */ extern "C" __declspec(dllexport) int molt_custom_close(struct molt_custom_t *custom) { int i; for (i = 0; i < 6; i++) { hipFree(g_mod.d_v[i]); hipFree(g_mod.d_w[i]); } hipFree(g_mod.d_src); hipFree(g_mod.d_dst); hipFree(g_mod.d_work); memset(&g_mod, 0, sizeof(g_mod)); return 0; } /* cuda_vect_mul : perform element-wise vector multiplication */ __device__ f64 cuda_vect_mul(f64 *veca, f64 *vecb, s32 veclen) { f64 val; s32 i; for (val = 0, i = 0; i < veclen; i++) { val += veca[i] * vecb[i]; } return val; } /* cuda_gfquad_m : green's function quadriture on the input vector (CUDA) */ __device__ void cuda_gfquad_m(f64 *dst, f64 *src, f64 dnu, f64 *wl, f64 *wr, s64 len, s32 M) { /* out and in's length is defined by hunklen */ f64 IL, IR; s32 iL, iR, iC, M2, N; s32 i; IL = 0; IR = 0; M2 = M / 2; N = len - 1; M++; iL = 0; iC = -M2; iR = len - M; /* left sweep */ for (i = 0; i < M2; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M] , &src[iL], M); dst[i + 1] = dst[i + 1] + IL; } for (; i < N - M2; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M], &src[i + 1 + iC], M); dst[i + 1] = dst[i + 1] + IL; } for (; i < N; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M], &src[iR], M); dst[i + 1] = dst[i + 1] + IL; } /* right sweep */ for (i = N - 1; i > N - 1 - M2; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[iR], M); dst[i] = dst[i] + IR; } for (; i >= M2; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[i + 1 + iC], M); dst[i] = dst[i] + IR; } for (; i >= 0; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[iL], M); dst[i] = dst[i] + IR; } // I = I / 2 for (i = 0; i < len; i++) dst[i] /= 2; } /* molt_makel : applies dirichlet boundary conditions to the line in (CUDA) */ __device__ void cuda_makel(f64 *src, f64 *vl, f64 *vr, f64 minval, s64 len) { /* * molt_makel applies dirichlet boundary conditions to the line in place * * Executes this: * w = w + ((wa - w(1)) * (vL - dN * vR) + (wb - w(end)) * (vR - dN * vL)) * / (1 - dN ^ 2) * * NOTE(s) * wa and wb are left here as const scalars for future expansion of boundary * conditions. */ f64 wa_use, wb_use, wc_use; f64 val; s64 i; const f64 wa = 0; const f64 wb = 0; // * wa_use - w(1) // * wb_use - w(end) // * wc_use - 1 - dN ^ 2 wa_use = wa - src[0]; wb_use = wb - src[len - 1]; wc_use = 1 - pow(minval, 2); for (i = 0; i < len; i++) { val = wa_use * vl[i] - minval * vr[i]; val += wb_use * vr[i] - minval * vl[i]; val /= wc_use; src[i] += val; } } /* cuda_sweep : the cuda parallel'd sweeping function */ __global__ void cuda_sweep(f64 *dst, f64 *src, f64 *vl, f64 *vr, f64 *wl, f64 *wr, f64 minval, f64 dnu, s32 M, ivec3_t *dim) { /* * NOTE (brian) * This function, while idiomatic CUDA, might seem a bit weird. Because * launching the kernel in higher dimensions is, honestly, difficult to * think about, I chose to solve the slightly easier problem, that is, * launch the kernel in "a single dimension", with "Y by Z (dim[1] * dim[2]) * threads, then use our single dimension launch parameters to determine * how far we're into the volume, using the IDX3D macro. */ u64 y, z, i; // first, get our thread number (thread 0, thread 1, thread 500, etc) i = threadIdx.x + blockDim.x * blockIdx.x; // use our volume dimensionality to determine the REAL y and z values from that // this assumes that we think about the problem in a "2D" sense y = i % (*dim)[1]; z = i / (*dim)[1]; i = IDX3D(0, y, z, (*dim)[1], (*dim)[2]); // don't perform the computation if we're out of range if (((u64)(*dim)[0] * (*dim)[1] * (*dim)[2]) < i) { return; } // now that we have this thread's starting point, perform the algorithm // on this thread, for this row in x cuda_gfquad_m(dst + i, src + i, dnu, wl, wr, (*dim)[0], M); cuda_makel(dst + i, vl, vr, minval, (*dim)[0]); } /* molt_custom_sweep : performs a threaded sweep across the mesh in the dimension specified */ extern "C" __declspec(dllexport) void molt_custom_sweep(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t ord, pdvec6_t params, dvec3_t dnu, s32 M) { /* * NOTE (brian) * Because of the interface being defined how it is, we first have to * create our mapping from host pointers, to dst pointers. * * We don't care about what the work pointer is because the device has to * have its own working memory anyways, AND we're just going to memset it * to 0 to begin with anyways. * * TODO (brian) * - it might be worth making these function return ints and checking for * errors. Ideally, these won't error, but I suppose you wouldn't * know until you checked.. */ f64 *d_src, *d_work, *d_dst; f64 *d_vl, *d_vr, *d_wl, *d_wr; f64 *h_vl, *h_vr, *h_wl, *h_wr; f64 usednu, minval; u64 elements, i; size_t bytes; elements = (u64)dim[0] * dim[1] * dim[2]; bytes = elements * sizeof(f64); // copy the bytes from host to device if the pointers have changed // since last time d_src = copy_if_needed(g_mod.d_src, &g_mod.l_src, src, bytes); d_dst = copy_if_needed(g_mod.d_dst, &g_mod.l_dst, dst, bytes); d_work = g_mod.d_work; // find our v and w weights on the device h_vl = params[0]; h_vr = params[1]; h_wl = params[2]; h_wr = params[3]; d_vl = NULL; d_vr = NULL; d_wl = NULL; d_wr = NULL; for (i = 0; i < 6; i++) { if (!d_vl && g_mod.h_v[i] == h_vl) { d_vl = g_mod.d_v[i]; } if (!d_vr && g_mod.h_v[i] == h_vr) { d_vr = g_mod.d_v[i]; } if (!d_wl && g_mod.h_w[i] == h_wl) { d_wl = g_mod.d_w[i]; } if (!d_wr && g_mod.h_w[i] == h_wr) { d_wr = g_mod.d_w[i]; } } // find the minval (dN in Matlab) // NOTE (brian) this should have the same dimensionality all the time for (i = 0, minval = DBL_MAX; i < dim[0]; i++) { if (h_vl[i] < minval) minval = h_vl[i]; } // determine the correct dnu to use switch(ord[0]) { case 'x': usednu = dnu[0]; break; case 'y': usednu = dnu[1]; break; case 'z': usednu = dnu[2]; break; default: // assert here? it's an illegal parameter break; } hipMemset(d_work, 0, bytes); u64 threads, blocks, iterations; iterations = dim[1] * dim[2]; threads = 256; blocks = (iterations + threads - 1) / threads; hipMemcpy(g_mod.d_dim, dim, sizeof g_mod.d_dim, hipMemcpyHostToDevice); // Launch kernel with dimensionality Y by Z, to sweep through the volume in a plane. // init dimensionality dim3s, launch our kernel, then wait for the sync hipLaunchKernelGGL(( cuda_sweep), dim3(blocks), dim3(threads), 0, 0, d_dst, d_src, d_vl, d_vr, d_wl, d_wr, minval, usednu, M, g_mod.d_dim); GPUASSERT(hipDeviceSynchronize()); // copy from device to host, so the library's expectations are met hipMemcpy((void *)dst, (void *)d_dst, bytes, hipMemcpyDeviceToHost); } /* cuda_genericidx : retrieves a generic index from input dimensionality */ __device__ u64 cuda_genericidx(ivec3_t ival, ivec3_t idim, cvec3_t order) { /* * NOTE (brian) * This is just a copy, for the CUDA device, of the library function * of a similar name. */ ivec3_t lval, ldim; s32 i; for (i = 0; i < 3; i++) { switch (order[i]) { case 'x': lval[i] = ival[0]; ldim[i] = idim[0]; break; case 'y': lval[i] = ival[1]; ldim[i] = idim[1]; break; case 'z': lval[i] = ival[2]; ldim[i] = idim[2]; break; } } return IDX3D(lval[0], lval[1], lval[2], ldim[1], ldim[2]); } /* cuda_reorg : the cuda parallel'd transposition function */ __global__ void cuda_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t *dim, cvec3_t src_ord, cvec3_t dst_ord) { u64 src_i, dst_i; ivec3_t curr; curr[0] = threadIdx.x + blockDim.x * blockIdx.x; curr[1] = threadIdx.y + blockDim.y * blockIdx.y; curr[2] = threadIdx.z + blockDim.z * blockIdx.z; src_i = cuda_genericidx(curr, (*dim), src_ord); dst_i = cuda_genericidx(curr, (*dim), src_ord); dst[dst_i] = src[src_i]; } /* molt_custom_reorg : reorganizes a 3d mesh from src to dst */ extern "C" __declspec(dllexport) void molt_custom_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t src_ord, cvec3_t dst_ord) { f64 *d_src, *d_work, *d_dst; u64 elements; dim3 grid, block; size_t bytes; elements = (u64)dim[0] * dim[1] * dim[2]; bytes = elements * sizeof(f64); d_src = copy_if_needed(g_mod.d_src, &g_mod.l_src, src, bytes); // d_dst = copy_if_needed(g_mod.d_dst, &g_mod.l_src, src, bytes); d_dst = g_mod.d_src; d_work = g_mod.d_work; hipMemset(d_work, 0, bytes); hipMemcpy(g_mod.d_dim, dim, sizeof g_mod.d_dim, hipMemcpyHostToDevice); // Unlike the cuda sweep, where we launch the kernel in a // "single dimension", we launch this one 3 dimensions. block = dim3(16, 16, 16); grid = dim3(ceil(dim[0] / block.x), ceil(dim[1] / block.y), ceil(dim[2] / block.z)); hipLaunchKernelGGL(( cuda_reorg), dim3(grid), dim3(block), 0, 0, d_dst, d_src, d_work, g_mod.d_dim, src_ord, dst_ord); GPUASSERT(hipDeviceSynchronize()); hipMemcpy((void *)dst, (void *)d_dst, bytes, hipMemcpyDeviceToHost); }
9c9cd98a1317e013ca0c1f746e0d5fb43b662d6f.cu
/* * Brian Chrzanowski * Tue Nov 19, 2019 21:02 * * NOTE (brian) * * - The threaded implementation can rely on memory where the CUDA model * can't _really_. IE, the working storage allocated by "molt.h" won't * do our graphics card any good. Because the library (even the * custom routine) expects that it will have the result of a given operation, * reorg or step in the destination hunk of memory, we'll have to copy it * to and from the graphics card. * * There are some optimizations that we can do though. * * 1) We don't ever expect the values for the v and w weights to ever change. * So, we can allocate storage for those at the beginning of the journey, * and keep those allocated until the library calls "molt_custom_close". * * 2) Unfortunately, the library expects it to be able to give us source * and destination memory hunks as it needs to. Because of this, * the custom implementation can't just keep the problem state on-device * the entire time, as that would require an equivalent amount of * allocations to the host device. * * To get around this, on every custom library call (reorg and sweep), we * can compare the work, dst, and src pointers to what we got "last" time. * If they're different, we need to copy from host to device, to update * what the device is using to perform the operation. * * TODO (brian) * 1. Test with copying from host to device and host to device every time */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <float.h> #include <limits.h> #include <math.h> #include "../common.h" #define MOLT_IMPLEMENTATION #include "../molt.h" #if !(defined(_WIN32) || defined(_WIN64)) #define __declspec(x) #endif struct molt_custommod_t { f64 *d_src, *d_work, *d_dst; f64 *l_src, *l_dst; // the last pointers we've seen struct molt_cfg_t config; ivec3_t *d_dim; f64 *d_v[6]; f64 *d_w[6]; f64 *h_v[6]; f64 *h_w[6]; }; static struct molt_custommod_t g_mod; #define GPUASSERT(v) { gpu_assert((v), (char *)__FILE__, __LINE__); } /* gpu_assert : exits if the condition is true */ inline void gpu_assert(cudaError_t code, char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUASSERT: %s:%d %s\n", file, line, cudaGetErrorString(code)); exit(1); } } /* alloc_and_copy : allocs space on device, copies 'size' bytes from host */ int alloc_and_copy(f64 **d, f64 **newh, f64 *oldh, size_t size); /* copy_if_needed : copies from host to device if needed, returns device ptr */ f64 *copy_if_needed(f64 *device, f64 *last_host, f64 *curr_host, size_t size); /* cuda_gfquad_m : green's function quadriture on the input vector (CUDA) */ __device__ void cuda_gfquad_m(f64 *dst, f64 *src, f64 dnu, f64 *wl, f64 *wr, s64 len, s32 M); /* molt_makel : applies dirichlet boundary conditions to the line in (CUDA) */ __device__ void cuda_makel(f64 *src, f64 *vl, f64 *vr, f64 minval, s64 len); /* cuda_sweep : the cuda parallel'd sweeping function */ __global__ void cuda_sweep(f64 *dst, f64 *src, f64 *vl, f64 *vr, f64 *wl, f64 *wr, f64 minval, f64 dnu, s32 M, ivec3_t dim); /* mk_genericidx : retrieves a generic index from input dimensionality */ __device__ u64 cuda_genericidx(ivec3_t ival, ivec3_t idim, cvec3_t order); /* cuda_reorg : the cuda parallel'd transposition function */ __global__ void cuda_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t src_ord, cvec3_t dst_ord); /* alloc_and_copy : allocs space on device, copies 'size' bytes from host */ int alloc_and_copy(f64 **d, f64 **newh, f64 *oldh, size_t size) { cudaError_t err; err = cudaMalloc((void **)d, size); if (err != cudaSuccess) { return -2; } err = cudaMemcpy((void *)*d, (void *)oldh, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { return -1; } // save the host pointer for later use *newh = oldh; return 0; } /* copy_if_needed : if needed, updates host pointer and data */ f64 *copy_if_needed(f64 *device, f64 **last_host, f64 *curr_host, size_t size) { if ((*last_host) != curr_host) { *last_host = curr_host; cudaMemcpy(device, curr_host, size, cudaMemcpyHostToDevice); } return device; } /* molt_custom_init : intializes the custom module */ extern "C" __declspec(dllexport) int molt_custom_open(struct molt_custom_t *custom) { u64 elements; struct molt_cfg_t *cfg; cudaError_t err; ivec3_t pinc; ivec3_t points; int rc; memset(&g_mod, 0, sizeof(struct molt_custommod_t)); // snag a copy of the config structure we'll use // thoughout the module's lifetime memcpy(&g_mod.config, custom->cfg, sizeof(struct molt_cfg_t)); cfg = &g_mod.config; molt_cfg_parampull_xyz(cfg, pinc, MOLT_PARAM_PINC); molt_cfg_parampull_xyz(cfg, points, MOLT_PARAM_POINTS); elements = pinc[0] * (u64)pinc[1] * pinc[2]; rc = alloc_and_copy(&g_mod.d_v[0], &g_mod.h_v[0], custom->vlx, points[0] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[1], &g_mod.h_v[1], custom->vrx, points[0] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[2], &g_mod.h_v[2], custom->vly, points[1] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[3], &g_mod.h_v[3], custom->vry, points[1] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[4], &g_mod.h_v[4], custom->vlz, points[2] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_v[5], &g_mod.h_v[5], custom->vrz, points[2] * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[0], &g_mod.h_w[0], custom->wlx, cfg->x_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[1], &g_mod.h_w[1], custom->wrx, cfg->x_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[2], &g_mod.h_w[2], custom->wly, cfg->y_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[3], &g_mod.h_w[3], custom->wry, cfg->y_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[4], &g_mod.h_w[4], custom->wlz, cfg->z_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } rc = alloc_and_copy(&g_mod.d_w[5], &g_mod.h_w[5], custom->wrz, cfg->z_params[MOLT_PARAM_POINTS] * (cfg->spaceacc + 1) * sizeof(f64)); if (rc < 0) { return -1; } // because we don't need to copy from the host, we'll just use cuda funcs err = cudaMalloc(&g_mod.d_src, elements * sizeof(f64)); if (err != cudaSuccess) { return -1; } err = cudaMalloc(&g_mod.d_dst, elements * sizeof(f64)); if (err != cudaSuccess) { return -1; } err = cudaMalloc(&g_mod.d_work, elements * sizeof(f64)); if (err != cudaSuccess) { return -1; } // NOTE (Brian) somehow, when I was testing on WIN32 this completely evaded me, but as far // as I can tell, you have to setup the CUDA system in a very special mode to seamlessly pass // host pointers and things to the device. // // Doing so would completely pollute the 'core' molt library and have a dependence on CUDA. // So, we'll just copy the ~12 bytes every time, and be done with it. err = cudaMalloc(&g_mod.d_dim, sizeof g_mod.d_dim); if (err != cudaSuccess) { return -1; } return 0; } /* molt_custom_close : cleans up the custom module */ extern "C" __declspec(dllexport) int molt_custom_close(struct molt_custom_t *custom) { int i; for (i = 0; i < 6; i++) { cudaFree(g_mod.d_v[i]); cudaFree(g_mod.d_w[i]); } cudaFree(g_mod.d_src); cudaFree(g_mod.d_dst); cudaFree(g_mod.d_work); memset(&g_mod, 0, sizeof(g_mod)); return 0; } /* cuda_vect_mul : perform element-wise vector multiplication */ __device__ f64 cuda_vect_mul(f64 *veca, f64 *vecb, s32 veclen) { f64 val; s32 i; for (val = 0, i = 0; i < veclen; i++) { val += veca[i] * vecb[i]; } return val; } /* cuda_gfquad_m : green's function quadriture on the input vector (CUDA) */ __device__ void cuda_gfquad_m(f64 *dst, f64 *src, f64 dnu, f64 *wl, f64 *wr, s64 len, s32 M) { /* out and in's length is defined by hunklen */ f64 IL, IR; s32 iL, iR, iC, M2, N; s32 i; IL = 0; IR = 0; M2 = M / 2; N = len - 1; M++; iL = 0; iC = -M2; iR = len - M; /* left sweep */ for (i = 0; i < M2; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M] , &src[iL], M); dst[i + 1] = dst[i + 1] + IL; } for (; i < N - M2; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M], &src[i + 1 + iC], M); dst[i + 1] = dst[i + 1] + IL; } for (; i < N; i++) { IL = dnu * IL + cuda_vect_mul(&wl[i * M], &src[iR], M); dst[i + 1] = dst[i + 1] + IL; } /* right sweep */ for (i = N - 1; i > N - 1 - M2; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[iR], M); dst[i] = dst[i] + IR; } for (; i >= M2; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[i + 1 + iC], M); dst[i] = dst[i] + IR; } for (; i >= 0; i--) { IR = dnu * IR + cuda_vect_mul(&wr[i * M], &src[iL], M); dst[i] = dst[i] + IR; } // I = I / 2 for (i = 0; i < len; i++) dst[i] /= 2; } /* molt_makel : applies dirichlet boundary conditions to the line in (CUDA) */ __device__ void cuda_makel(f64 *src, f64 *vl, f64 *vr, f64 minval, s64 len) { /* * molt_makel applies dirichlet boundary conditions to the line in place * * Executes this: * w = w + ((wa - w(1)) * (vL - dN * vR) + (wb - w(end)) * (vR - dN * vL)) * / (1 - dN ^ 2) * * NOTE(s) * wa and wb are left here as const scalars for future expansion of boundary * conditions. */ f64 wa_use, wb_use, wc_use; f64 val; s64 i; const f64 wa = 0; const f64 wb = 0; // * wa_use - w(1) // * wb_use - w(end) // * wc_use - 1 - dN ^ 2 wa_use = wa - src[0]; wb_use = wb - src[len - 1]; wc_use = 1 - pow(minval, 2); for (i = 0; i < len; i++) { val = wa_use * vl[i] - minval * vr[i]; val += wb_use * vr[i] - minval * vl[i]; val /= wc_use; src[i] += val; } } /* cuda_sweep : the cuda parallel'd sweeping function */ __global__ void cuda_sweep(f64 *dst, f64 *src, f64 *vl, f64 *vr, f64 *wl, f64 *wr, f64 minval, f64 dnu, s32 M, ivec3_t *dim) { /* * NOTE (brian) * This function, while idiomatic CUDA, might seem a bit weird. Because * launching the kernel in higher dimensions is, honestly, difficult to * think about, I chose to solve the slightly easier problem, that is, * launch the kernel in "a single dimension", with "Y by Z (dim[1] * dim[2]) * threads, then use our single dimension launch parameters to determine * how far we're into the volume, using the IDX3D macro. */ u64 y, z, i; // first, get our thread number (thread 0, thread 1, thread 500, etc) i = threadIdx.x + blockDim.x * blockIdx.x; // use our volume dimensionality to determine the REAL y and z values from that // this assumes that we think about the problem in a "2D" sense y = i % (*dim)[1]; z = i / (*dim)[1]; i = IDX3D(0, y, z, (*dim)[1], (*dim)[2]); // don't perform the computation if we're out of range if (((u64)(*dim)[0] * (*dim)[1] * (*dim)[2]) < i) { return; } // now that we have this thread's starting point, perform the algorithm // on this thread, for this row in x cuda_gfquad_m(dst + i, src + i, dnu, wl, wr, (*dim)[0], M); cuda_makel(dst + i, vl, vr, minval, (*dim)[0]); } /* molt_custom_sweep : performs a threaded sweep across the mesh in the dimension specified */ extern "C" __declspec(dllexport) void molt_custom_sweep(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t ord, pdvec6_t params, dvec3_t dnu, s32 M) { /* * NOTE (brian) * Because of the interface being defined how it is, we first have to * create our mapping from host pointers, to dst pointers. * * We don't care about what the work pointer is because the device has to * have its own working memory anyways, AND we're just going to memset it * to 0 to begin with anyways. * * TODO (brian) * - it might be worth making these function return ints and checking for * errors. Ideally, these won't error, but I suppose you wouldn't * know until you checked.. */ f64 *d_src, *d_work, *d_dst; f64 *d_vl, *d_vr, *d_wl, *d_wr; f64 *h_vl, *h_vr, *h_wl, *h_wr; f64 usednu, minval; u64 elements, i; size_t bytes; elements = (u64)dim[0] * dim[1] * dim[2]; bytes = elements * sizeof(f64); // copy the bytes from host to device if the pointers have changed // since last time d_src = copy_if_needed(g_mod.d_src, &g_mod.l_src, src, bytes); d_dst = copy_if_needed(g_mod.d_dst, &g_mod.l_dst, dst, bytes); d_work = g_mod.d_work; // find our v and w weights on the device h_vl = params[0]; h_vr = params[1]; h_wl = params[2]; h_wr = params[3]; d_vl = NULL; d_vr = NULL; d_wl = NULL; d_wr = NULL; for (i = 0; i < 6; i++) { if (!d_vl && g_mod.h_v[i] == h_vl) { d_vl = g_mod.d_v[i]; } if (!d_vr && g_mod.h_v[i] == h_vr) { d_vr = g_mod.d_v[i]; } if (!d_wl && g_mod.h_w[i] == h_wl) { d_wl = g_mod.d_w[i]; } if (!d_wr && g_mod.h_w[i] == h_wr) { d_wr = g_mod.d_w[i]; } } // find the minval (dN in Matlab) // NOTE (brian) this should have the same dimensionality all the time for (i = 0, minval = DBL_MAX; i < dim[0]; i++) { if (h_vl[i] < minval) minval = h_vl[i]; } // determine the correct dnu to use switch(ord[0]) { case 'x': usednu = dnu[0]; break; case 'y': usednu = dnu[1]; break; case 'z': usednu = dnu[2]; break; default: // assert here? it's an illegal parameter break; } cudaMemset(d_work, 0, bytes); u64 threads, blocks, iterations; iterations = dim[1] * dim[2]; threads = 256; blocks = (iterations + threads - 1) / threads; cudaMemcpy(g_mod.d_dim, dim, sizeof g_mod.d_dim, cudaMemcpyHostToDevice); // Launch kernel with dimensionality Y by Z, to sweep through the volume in a plane. // init dimensionality dim3s, launch our kernel, then wait for the sync cuda_sweep<<<blocks, threads>>>(d_dst, d_src, d_vl, d_vr, d_wl, d_wr, minval, usednu, M, g_mod.d_dim); GPUASSERT(cudaDeviceSynchronize()); // copy from device to host, so the library's expectations are met cudaMemcpy((void *)dst, (void *)d_dst, bytes, cudaMemcpyDeviceToHost); } /* cuda_genericidx : retrieves a generic index from input dimensionality */ __device__ u64 cuda_genericidx(ivec3_t ival, ivec3_t idim, cvec3_t order) { /* * NOTE (brian) * This is just a copy, for the CUDA device, of the library function * of a similar name. */ ivec3_t lval, ldim; s32 i; for (i = 0; i < 3; i++) { switch (order[i]) { case 'x': lval[i] = ival[0]; ldim[i] = idim[0]; break; case 'y': lval[i] = ival[1]; ldim[i] = idim[1]; break; case 'z': lval[i] = ival[2]; ldim[i] = idim[2]; break; } } return IDX3D(lval[0], lval[1], lval[2], ldim[1], ldim[2]); } /* cuda_reorg : the cuda parallel'd transposition function */ __global__ void cuda_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t *dim, cvec3_t src_ord, cvec3_t dst_ord) { u64 src_i, dst_i; ivec3_t curr; curr[0] = threadIdx.x + blockDim.x * blockIdx.x; curr[1] = threadIdx.y + blockDim.y * blockIdx.y; curr[2] = threadIdx.z + blockDim.z * blockIdx.z; src_i = cuda_genericidx(curr, (*dim), src_ord); dst_i = cuda_genericidx(curr, (*dim), src_ord); dst[dst_i] = src[src_i]; } /* molt_custom_reorg : reorganizes a 3d mesh from src to dst */ extern "C" __declspec(dllexport) void molt_custom_reorg(f64 *dst, f64 *src, f64 *work, ivec3_t dim, cvec3_t src_ord, cvec3_t dst_ord) { f64 *d_src, *d_work, *d_dst; u64 elements; dim3 grid, block; size_t bytes; elements = (u64)dim[0] * dim[1] * dim[2]; bytes = elements * sizeof(f64); d_src = copy_if_needed(g_mod.d_src, &g_mod.l_src, src, bytes); // d_dst = copy_if_needed(g_mod.d_dst, &g_mod.l_src, src, bytes); d_dst = g_mod.d_src; d_work = g_mod.d_work; cudaMemset(d_work, 0, bytes); cudaMemcpy(g_mod.d_dim, dim, sizeof g_mod.d_dim, cudaMemcpyHostToDevice); // Unlike the cuda sweep, where we launch the kernel in a // "single dimension", we launch this one 3 dimensions. block = dim3(16, 16, 16); grid = dim3(ceil(dim[0] / block.x), ceil(dim[1] / block.y), ceil(dim[2] / block.z)); cuda_reorg<<<grid, block>>>(d_dst, d_src, d_work, g_mod.d_dim, src_ord, dst_ord); GPUASSERT(cudaDeviceSynchronize()); cudaMemcpy((void *)dst, (void *)d_dst, bytes, cudaMemcpyDeviceToHost); }
4f56008905242e7a3673f2e7e1cb7c243905656a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_ba1; int xdim0_update_halo_kernel1_ba1_h = -1; __constant__ int ydim0_update_halo_kernel1_ba1; int ydim0_update_halo_kernel1_ba1_h = -1; __constant__ int xdim1_update_halo_kernel1_ba1; int xdim1_update_halo_kernel1_ba1_h = -1; __constant__ int ydim1_update_halo_kernel1_ba1; int ydim1_update_halo_kernel1_ba1_h = -1; __constant__ int xdim2_update_halo_kernel1_ba1; int xdim2_update_halo_kernel1_ba1_h = -1; __constant__ int ydim2_update_halo_kernel1_ba1; int ydim2_update_halo_kernel1_ba1_h = -1; __constant__ int xdim3_update_halo_kernel1_ba1; int xdim3_update_halo_kernel1_ba1_h = -1; __constant__ int ydim3_update_halo_kernel1_ba1; int ydim3_update_halo_kernel1_ba1_h = -1; __constant__ int xdim4_update_halo_kernel1_ba1; int xdim4_update_halo_kernel1_ba1_h = -1; __constant__ int ydim4_update_halo_kernel1_ba1; int ydim4_update_halo_kernel1_ba1_h = -1; __constant__ int xdim5_update_halo_kernel1_ba1; int xdim5_update_halo_kernel1_ba1_h = -1; __constant__ int ydim5_update_halo_kernel1_ba1; int ydim5_update_halo_kernel1_ba1_h = -1; __constant__ int xdim6_update_halo_kernel1_ba1; int xdim6_update_halo_kernel1_ba1_h = -1; __constant__ int ydim6_update_halo_kernel1_ba1; int ydim6_update_halo_kernel1_ba1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_ba1 * (y) + \ xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_ba1 * (y) + \ xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_ba1 * (y) + \ xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_ba1 * (y) + \ xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_ba1 * (y) + \ xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_ba1 * (y) + \ xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_ba1 * (y) + \ xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1 * (z)) // user function __device__ inline void update_halo_kernel1_ba1_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 0, 1)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 0, 1)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 1)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 1)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 0, 1)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 0, 1)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 0, 1)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_ba1(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_ba1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_ba1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 21)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(21, "update_halo_kernel1_ba1"); OPS_kernels[21].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_ba1_h || ydim0 != ydim0_update_halo_kernel1_ba1_h || xdim1 != xdim1_update_halo_kernel1_ba1_h || ydim1 != ydim1_update_halo_kernel1_ba1_h || xdim2 != xdim2_update_halo_kernel1_ba1_h || ydim2 != ydim2_update_halo_kernel1_ba1_h || xdim3 != xdim3_update_halo_kernel1_ba1_h || ydim3 != ydim3_update_halo_kernel1_ba1_h || xdim4 != xdim4_update_halo_kernel1_ba1_h || ydim4 != ydim4_update_halo_kernel1_ba1_h || xdim5 != xdim5_update_halo_kernel1_ba1_h || ydim5 != ydim5_update_halo_kernel1_ba1_h || xdim6 != xdim6_update_halo_kernel1_ba1_h || ydim6 != ydim6_update_halo_kernel1_ba1_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel1_ba1, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_ba1_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel1_ba1, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_ba1_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel1_ba1, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_ba1_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel1_ba1, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_ba1_h = ydim1; hipMemcpyToSymbol(xdim2_update_halo_kernel1_ba1, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_ba1_h = xdim2; hipMemcpyToSymbol(ydim2_update_halo_kernel1_ba1, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_ba1_h = ydim2; hipMemcpyToSymbol(xdim3_update_halo_kernel1_ba1, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_ba1_h = xdim3; hipMemcpyToSymbol(ydim3_update_halo_kernel1_ba1, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_ba1_h = ydim3; hipMemcpyToSymbol(xdim4_update_halo_kernel1_ba1, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_ba1_h = xdim4; hipMemcpyToSymbol(ydim4_update_halo_kernel1_ba1, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_ba1_h = ydim4; hipMemcpyToSymbol(xdim5_update_halo_kernel1_ba1, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_ba1_h = xdim5; hipMemcpyToSymbol(ydim5_update_halo_kernel1_ba1, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_ba1_h = ydim5; hipMemcpyToSymbol(xdim6_update_halo_kernel1_ba1, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_ba1_h = xdim6; hipMemcpyToSymbol(ydim6_update_halo_kernel1_ba1, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_ba1_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[21].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel1_ba1), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[21].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[21].mpi_time += t2 - t1; OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 21; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 21; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_ba1_execute; if (OPS_diags > 1) { ops_timing_realloc(21, "update_halo_kernel1_ba1"); } ops_enqueue_kernel(desc); } #endif
4f56008905242e7a3673f2e7e1cb7c243905656a.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_ba1; int xdim0_update_halo_kernel1_ba1_h = -1; __constant__ int ydim0_update_halo_kernel1_ba1; int ydim0_update_halo_kernel1_ba1_h = -1; __constant__ int xdim1_update_halo_kernel1_ba1; int xdim1_update_halo_kernel1_ba1_h = -1; __constant__ int ydim1_update_halo_kernel1_ba1; int ydim1_update_halo_kernel1_ba1_h = -1; __constant__ int xdim2_update_halo_kernel1_ba1; int xdim2_update_halo_kernel1_ba1_h = -1; __constant__ int ydim2_update_halo_kernel1_ba1; int ydim2_update_halo_kernel1_ba1_h = -1; __constant__ int xdim3_update_halo_kernel1_ba1; int xdim3_update_halo_kernel1_ba1_h = -1; __constant__ int ydim3_update_halo_kernel1_ba1; int ydim3_update_halo_kernel1_ba1_h = -1; __constant__ int xdim4_update_halo_kernel1_ba1; int xdim4_update_halo_kernel1_ba1_h = -1; __constant__ int ydim4_update_halo_kernel1_ba1; int ydim4_update_halo_kernel1_ba1_h = -1; __constant__ int xdim5_update_halo_kernel1_ba1; int xdim5_update_halo_kernel1_ba1_h = -1; __constant__ int ydim5_update_halo_kernel1_ba1; int ydim5_update_halo_kernel1_ba1_h = -1; __constant__ int xdim6_update_halo_kernel1_ba1; int xdim6_update_halo_kernel1_ba1_h = -1; __constant__ int ydim6_update_halo_kernel1_ba1; int ydim6_update_halo_kernel1_ba1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel1_ba1 * (y) + \ xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1 * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel1_ba1 * (y) + \ xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1 * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_update_halo_kernel1_ba1 * (y) + \ xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1 * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_update_halo_kernel1_ba1 * (y) + \ xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1 * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_update_halo_kernel1_ba1 * (y) + \ xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1 * (z)) #define OPS_ACC5(x, y, z) \ (x + xdim5_update_halo_kernel1_ba1 * (y) + \ xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1 * (z)) #define OPS_ACC6(x, y, z) \ (x + xdim6_update_halo_kernel1_ba1 * (y) + \ xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1 * (z)) // user function __device__ inline void update_halo_kernel1_ba1_gpu(double *density0, double *density1, double *energy0, double *energy1, double *pressure, double *viscosity, double *soundspeed, const int *fields) { if (fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0, 0, 0)] = density0[OPS_ACC0(0, 0, 1)]; if (fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0, 0, 0)] = density1[OPS_ACC1(0, 0, 1)]; if (fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0, 0, 0)] = energy0[OPS_ACC2(0, 0, 1)]; if (fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0, 0, 0)] = energy1[OPS_ACC3(0, 0, 1)]; if (fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0, 0, 0)] = pressure[OPS_ACC4(0, 0, 1)]; if (fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0, 0, 0)] = viscosity[OPS_ACC5(0, 0, 1)]; if (fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0, 0, 0)] = soundspeed[OPS_ACC6(0, 0, 1)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #undef OPS_ACC6 __global__ void ops_update_halo_kernel1_ba1(double *__restrict arg0, double *__restrict arg1, double *__restrict arg2, double *__restrict arg3, double *__restrict arg4, double *__restrict arg5, double *__restrict arg6, const int *__restrict arg7, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim0_update_halo_kernel1_ba1 * ydim0_update_halo_kernel1_ba1; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim1_update_halo_kernel1_ba1 * ydim1_update_halo_kernel1_ba1; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim2_update_halo_kernel1_ba1 * ydim2_update_halo_kernel1_ba1; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim3_update_halo_kernel1_ba1 * ydim3_update_halo_kernel1_ba1; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim4_update_halo_kernel1_ba1 * ydim4_update_halo_kernel1_ba1; arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim5_update_halo_kernel1_ba1 * ydim5_update_halo_kernel1_ba1; arg6 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim6_update_halo_kernel1_ba1 + idx_z * 1 * 1 * xdim6_update_halo_kernel1_ba1 * ydim6_update_halo_kernel1_ba1; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel1_ba1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { #else void ops_par_loop_update_halo_kernel1_ba1_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; ops_arg arg5 = desc->args[5]; ops_arg arg6 = desc->args[6]; ops_arg arg7 = desc->args[7]; #endif // Timing double t1, t2, c1, c2; ops_arg args[8] = {arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 8, range, 21)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(21, "update_halo_kernel1_ba1"); OPS_kernels[21].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; int xdim5 = args[5].dat->size[0]; int ydim5 = args[5].dat->size[1]; int xdim6 = args[6].dat->size[0]; int ydim6 = args[6].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel1_ba1_h || ydim0 != ydim0_update_halo_kernel1_ba1_h || xdim1 != xdim1_update_halo_kernel1_ba1_h || ydim1 != ydim1_update_halo_kernel1_ba1_h || xdim2 != xdim2_update_halo_kernel1_ba1_h || ydim2 != ydim2_update_halo_kernel1_ba1_h || xdim3 != xdim3_update_halo_kernel1_ba1_h || ydim3 != ydim3_update_halo_kernel1_ba1_h || xdim4 != xdim4_update_halo_kernel1_ba1_h || ydim4 != ydim4_update_halo_kernel1_ba1_h || xdim5 != xdim5_update_halo_kernel1_ba1_h || ydim5 != ydim5_update_halo_kernel1_ba1_h || xdim6 != xdim6_update_halo_kernel1_ba1_h || ydim6 != ydim6_update_halo_kernel1_ba1_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel1_ba1, &xdim0, sizeof(int)); xdim0_update_halo_kernel1_ba1_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel1_ba1, &ydim0, sizeof(int)); ydim0_update_halo_kernel1_ba1_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel1_ba1, &xdim1, sizeof(int)); xdim1_update_halo_kernel1_ba1_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel1_ba1, &ydim1, sizeof(int)); ydim1_update_halo_kernel1_ba1_h = ydim1; cudaMemcpyToSymbol(xdim2_update_halo_kernel1_ba1, &xdim2, sizeof(int)); xdim2_update_halo_kernel1_ba1_h = xdim2; cudaMemcpyToSymbol(ydim2_update_halo_kernel1_ba1, &ydim2, sizeof(int)); ydim2_update_halo_kernel1_ba1_h = ydim2; cudaMemcpyToSymbol(xdim3_update_halo_kernel1_ba1, &xdim3, sizeof(int)); xdim3_update_halo_kernel1_ba1_h = xdim3; cudaMemcpyToSymbol(ydim3_update_halo_kernel1_ba1, &ydim3, sizeof(int)); ydim3_update_halo_kernel1_ba1_h = ydim3; cudaMemcpyToSymbol(xdim4_update_halo_kernel1_ba1, &xdim4, sizeof(int)); xdim4_update_halo_kernel1_ba1_h = xdim4; cudaMemcpyToSymbol(ydim4_update_halo_kernel1_ba1, &ydim4, sizeof(int)); ydim4_update_halo_kernel1_ba1_h = ydim4; cudaMemcpyToSymbol(xdim5_update_halo_kernel1_ba1, &xdim5, sizeof(int)); xdim5_update_halo_kernel1_ba1_h = xdim5; cudaMemcpyToSymbol(ydim5_update_halo_kernel1_ba1, &ydim5, sizeof(int)); ydim5_update_halo_kernel1_ba1_h = ydim5; cudaMemcpyToSymbol(xdim6_update_halo_kernel1_ba1, &xdim6, sizeof(int)); xdim6_update_halo_kernel1_ba1_h = xdim6; cudaMemcpyToSymbol(ydim6_update_halo_kernel1_ba1, &ydim6, sizeof(int)); ydim6_update_halo_kernel1_ba1_h = ydim6; } int *arg7h = (int *)arg7.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg7.data = OPS_consts_h + consts_bytes; arg7.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size); int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size); char *p_a[8]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; int base5 = args[5].dat->base_offset + dat5 * 1 * (start[0] * args[5].stencil->stride[0]); base5 = base5 + dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]); base5 = base5 + dat5 * args[5].dat->size[0] * args[5].dat->size[1] * (start[2] * args[5].stencil->stride[2]); p_a[5] = (char *)args[5].data_d + base5; int base6 = args[6].dat->base_offset + dat6 * 1 * (start[0] * args[6].stencil->stride[0]); base6 = base6 + dat6 * args[6].dat->size[0] * (start[1] * args[6].stencil->stride[1]); base6 = base6 + dat6 * args[6].dat->size[0] * args[6].dat->size[1] * (start[2] * args[6].stencil->stride[2]); p_a[6] = (char *)args[6].data_d + base6; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 8); ops_halo_exchanges(args, 8, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[21].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel1_ba1<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (double *)p_a[6], (int *)arg7.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[21].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 8); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); ops_set_halo_dirtybit3(&args[2], range); ops_set_halo_dirtybit3(&args[3], range); ops_set_halo_dirtybit3(&args[4], range); ops_set_halo_dirtybit3(&args[5], range); ops_set_halo_dirtybit3(&args[6], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[21].mpi_time += t2 - t1; OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg5); OPS_kernels[21].transfer += ops_compute_transfer(dim, start, end, &arg6); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel1_ba1(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 21; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 21; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 8; desc->args = (ops_arg *)malloc(8 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->args[5] = arg5; desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index; desc->args[6] = arg6; desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index; desc->args[7] = arg7; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg7.data, NUM_FIELDS * sizeof(int)); desc->args[7].data = tmp; desc->function = ops_par_loop_update_halo_kernel1_ba1_execute; if (OPS_diags > 1) { ops_timing_realloc(21, "update_halo_kernel1_ba1"); } ops_enqueue_kernel(desc); } #endif
c97526bfe5657eb0f2e171d9605595be3cc3c4a3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdlib.h> #include <cmath> #include "hl_base.h" #include "hl_cuda.h" #include "hl_perturbation_util.cuh" #include "hl_time.h" #define _USE_MATH_DEFINES /* * Get the original coordinate for a pixel in a transformed image. * x, y: coordiate in the transformed image. * tgtCenter: the center coordiate of the transformed image. * imgSCenter: the center coordinate of the source image. * centerX, centerY: translation. * sourceX, sourceY: output coordinates in the original image. */ __device__ void getTranformCoord(int x, int y, real theta, real scale, real tgtCenter, real imgCenter, real centerR, real centerC, int* sourceX, int* sourceY) { real H[4] = {cosf(-theta), -sinf(-theta), sinf(-theta), cosf(-theta)}; // compute coornidates in the rotated and scaled image real x_new = x - tgtCenter + centerC; real y_new = y - tgtCenter + centerR; // compute coornidates in the original image x_new -= imgCenter; y_new -= imgCenter; real xx = H[0] * x_new + H[1] * y_new; real yy = H[2] * x_new + H[3] * y_new; *sourceX = __float2int_rn(xx / scale + imgCenter); *sourceY = __float2int_rn(yy / scale + imgCenter); } /* * imgs: (numImages, imgPixels) * target: (numImages * samplingRate, tgtPixels) * the channels of one pixel are stored continuously in memory. * * created by Wei Xu (genome), converted by Jiang Wang */ __global__ void kSamplingPatches(const real* imgs, real* targets, int imgSize, int tgtSize, const int channels, int samplingRate, const real* thetas, const real* scales, const int* centerRs, const int* centerCs, const real padValue, const int numImages) { const int caseIdx = blockIdx.x * 4 + threadIdx.x; const int pxIdx = blockIdx.y * 128 + threadIdx.y; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int numPatches = numImages * samplingRate; real tgtCenter = (tgtSize - 1) / 2; real imgCenter = (imgSize - 1) / 2; if (pxIdx < tgtPixels && caseIdx < numPatches) { const int imgIdx = caseIdx / samplingRate; // transform coordiates const int pxX = pxIdx % tgtSize; const int pxY = pxIdx / tgtSize; int srcPxX, srcPxY; getTranformCoord(pxX, pxY, thetas[imgIdx], scales[imgIdx], tgtCenter, imgCenter, centerCs[caseIdx], centerRs[caseIdx], &srcPxX, &srcPxY); imgs += (imgIdx * imgPixels + srcPxY * imgSize + srcPxX) * channels; targets += (caseIdx * tgtPixels + pxIdx) * channels; if (srcPxX >= 0 && srcPxX < imgSize && srcPxY >= 0 && srcPxY < imgSize) { for (int j = 0; j < channels; j++) targets[j] = imgs[j]; } else { for (int j = 0; j < channels; j++) targets[j] = padValue; } } } /* * Functionality: generate the disturb (rotation and scaling) and * sampling location sequence * * created by Wei Xu */ void hl_generate_disturb_params(real*& gpuAngle, real*& gpuScaleRatio, int*& gpuCenterR, int*& gpuCenterC, int numImages, int imgSize, real rotateAngle, real scaleRatio, int samplingRate, bool isTrain) { // The number of output samples. int numPatches = numImages * samplingRate; // create CPU perturbation parameters. real* r_angle = new real[numImages]; real* s_ratio = new real[numImages]; int* center_r = new int[numPatches]; int* center_c = new int[numPatches]; // generate the random disturbance sequence and the sampling locations if (isTrain) { // random sampling for training // generate rotation ans scaling parameters // TODO(yuyang18): Since it will initialize random seed here, we can use // rand_r instead of rand to make this method thread safe. srand(getCurrentTimeStick()); for (int i = 0; i < numImages; i++) { r_angle[i] = (rotateAngle * M_PI / 180.0) * (rand() / (RAND_MAX + 1.0) // NOLINT - 0.5); s_ratio[i] = 1 + (rand() / (RAND_MAX + 1.0) - 0.5) * scaleRatio; // NOLINT } int imgCenter = (imgSize - 1) / 2; // generate sampling location parameters for (int i = 0; i < numImages; i++) { int j = 0; srand((unsigned)time(NULL)); while (j < samplingRate) { int pxX = (int)(real(imgSize - 1) * rand() / (RAND_MAX + 1.0)); // NOLINT int pxY = (int)(real(imgSize - 1) * rand() / (RAND_MAX + 1.0)); // NOLINT const real H[4] = {cos(-r_angle[i]), -sin(-r_angle[i]), sin(-r_angle[i]), cos(-r_angle[i])}; real x = pxX - imgCenter; real y = pxY - imgCenter; real xx = H[0] * x + H[1] * y; real yy = H[2] * x + H[3] * y; real srcPxX = xx / s_ratio[i] + imgCenter; real srcPxY = yy / s_ratio[i] + imgCenter; if (srcPxX >= 0 && srcPxX <= imgSize - 1 && srcPxY >= 0 && srcPxY <= imgSize - 1) { center_r[i * samplingRate + j] = pxY; center_c[i * samplingRate + j] = pxX; j++; } } } } else { // central crop for testing for (int i = 0; i < numImages; i++) { r_angle[i] = 0.0; s_ratio[i] = 1.0; for (int j = 0; j < samplingRate; j++) { center_r[i * samplingRate + j] = (imgSize - 1) / 2; center_c[i * samplingRate + j] = (imgSize - 1) / 2; } } } // copy disturbance sequence to gpu hl_memcpy_host2device(gpuAngle, r_angle, sizeof(real) * numImages); hl_memcpy_host2device(gpuScaleRatio, s_ratio, sizeof(real) * numImages); delete[] r_angle; delete[] s_ratio; // copy sampling location sequence to gpu hl_memcpy_host2device(gpuCenterR, center_r, sizeof(int) * numPatches); hl_memcpy_host2device(gpuCenterC, center_c, sizeof(int) * numPatches); delete[] center_r; delete[] center_c; } void hl_conv_random_disturb_with_params(const real* images, int imgSize, int tgtSize, int channels, int numImages, int samplingRate, const real* gpuRotationAngle, const real* gpuScaleRatio, const int* gpuCenterR, const int* gpuCenterC, int paddingValue, real* target) { // The number of output samples. int numPatches = numImages * samplingRate; // The memory size of one output patch. int targetSize = tgtSize * tgtSize; dim3 threadsPerBlock(4, 128); dim3 numBlocks(DIVUP(numPatches, 4), DIVUP(targetSize, 128)); hipLaunchKernelGGL(( kSamplingPatches), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, images, target, imgSize, tgtSize, channels, samplingRate, gpuRotationAngle, gpuScaleRatio, gpuCenterR, gpuCenterC, paddingValue, numImages); hl_device_synchronize(); } void hl_conv_random_disturb(const real* images, int imgSize, int tgtSize, int channels, int numImages, real scaleRatio, real rotateAngle, int samplingRate, real* gpu_r_angle, real* gpu_s_ratio, int* gpu_center_r, int* gpu_center_c, int paddingValue, bool isTrain, real* targets) { // generate the random disturbance sequence and the sampling locations hl_generate_disturb_params(gpu_r_angle, gpu_s_ratio, gpu_center_r, gpu_center_c, numImages, imgSize, rotateAngle, scaleRatio, samplingRate, isTrain); hl_conv_random_disturb_with_params(images, imgSize, tgtSize, channels, numImages, samplingRate, gpu_r_angle, gpu_s_ratio, gpu_center_r, gpu_center_r, paddingValue, targets); }
c97526bfe5657eb0f2e171d9605595be3cc3c4a3.cu
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdlib.h> #include <cmath> #include "hl_base.h" #include "hl_cuda.h" #include "hl_perturbation_util.cuh" #include "hl_time.h" #define _USE_MATH_DEFINES /* * Get the original coordinate for a pixel in a transformed image. * x, y: coordiate in the transformed image. * tgtCenter: the center coordiate of the transformed image. * imgSCenter: the center coordinate of the source image. * centerX, centerY: translation. * sourceX, sourceY: output coordinates in the original image. */ __device__ void getTranformCoord(int x, int y, real theta, real scale, real tgtCenter, real imgCenter, real centerR, real centerC, int* sourceX, int* sourceY) { real H[4] = {cosf(-theta), -sinf(-theta), sinf(-theta), cosf(-theta)}; // compute coornidates in the rotated and scaled image real x_new = x - tgtCenter + centerC; real y_new = y - tgtCenter + centerR; // compute coornidates in the original image x_new -= imgCenter; y_new -= imgCenter; real xx = H[0] * x_new + H[1] * y_new; real yy = H[2] * x_new + H[3] * y_new; *sourceX = __float2int_rn(xx / scale + imgCenter); *sourceY = __float2int_rn(yy / scale + imgCenter); } /* * imgs: (numImages, imgPixels) * target: (numImages * samplingRate, tgtPixels) * the channels of one pixel are stored continuously in memory. * * created by Wei Xu (genome), converted by Jiang Wang */ __global__ void kSamplingPatches(const real* imgs, real* targets, int imgSize, int tgtSize, const int channels, int samplingRate, const real* thetas, const real* scales, const int* centerRs, const int* centerCs, const real padValue, const int numImages) { const int caseIdx = blockIdx.x * 4 + threadIdx.x; const int pxIdx = blockIdx.y * 128 + threadIdx.y; const int imgPixels = imgSize * imgSize; const int tgtPixels = tgtSize * tgtSize; const int numPatches = numImages * samplingRate; real tgtCenter = (tgtSize - 1) / 2; real imgCenter = (imgSize - 1) / 2; if (pxIdx < tgtPixels && caseIdx < numPatches) { const int imgIdx = caseIdx / samplingRate; // transform coordiates const int pxX = pxIdx % tgtSize; const int pxY = pxIdx / tgtSize; int srcPxX, srcPxY; getTranformCoord(pxX, pxY, thetas[imgIdx], scales[imgIdx], tgtCenter, imgCenter, centerCs[caseIdx], centerRs[caseIdx], &srcPxX, &srcPxY); imgs += (imgIdx * imgPixels + srcPxY * imgSize + srcPxX) * channels; targets += (caseIdx * tgtPixels + pxIdx) * channels; if (srcPxX >= 0 && srcPxX < imgSize && srcPxY >= 0 && srcPxY < imgSize) { for (int j = 0; j < channels; j++) targets[j] = imgs[j]; } else { for (int j = 0; j < channels; j++) targets[j] = padValue; } } } /* * Functionality: generate the disturb (rotation and scaling) and * sampling location sequence * * created by Wei Xu */ void hl_generate_disturb_params(real*& gpuAngle, real*& gpuScaleRatio, int*& gpuCenterR, int*& gpuCenterC, int numImages, int imgSize, real rotateAngle, real scaleRatio, int samplingRate, bool isTrain) { // The number of output samples. int numPatches = numImages * samplingRate; // create CPU perturbation parameters. real* r_angle = new real[numImages]; real* s_ratio = new real[numImages]; int* center_r = new int[numPatches]; int* center_c = new int[numPatches]; // generate the random disturbance sequence and the sampling locations if (isTrain) { // random sampling for training // generate rotation ans scaling parameters // TODO(yuyang18): Since it will initialize random seed here, we can use // rand_r instead of rand to make this method thread safe. srand(getCurrentTimeStick()); for (int i = 0; i < numImages; i++) { r_angle[i] = (rotateAngle * M_PI / 180.0) * (rand() / (RAND_MAX + 1.0) // NOLINT - 0.5); s_ratio[i] = 1 + (rand() / (RAND_MAX + 1.0) - 0.5) * scaleRatio; // NOLINT } int imgCenter = (imgSize - 1) / 2; // generate sampling location parameters for (int i = 0; i < numImages; i++) { int j = 0; srand((unsigned)time(NULL)); while (j < samplingRate) { int pxX = (int)(real(imgSize - 1) * rand() / (RAND_MAX + 1.0)); // NOLINT int pxY = (int)(real(imgSize - 1) * rand() / (RAND_MAX + 1.0)); // NOLINT const real H[4] = {cos(-r_angle[i]), -sin(-r_angle[i]), sin(-r_angle[i]), cos(-r_angle[i])}; real x = pxX - imgCenter; real y = pxY - imgCenter; real xx = H[0] * x + H[1] * y; real yy = H[2] * x + H[3] * y; real srcPxX = xx / s_ratio[i] + imgCenter; real srcPxY = yy / s_ratio[i] + imgCenter; if (srcPxX >= 0 && srcPxX <= imgSize - 1 && srcPxY >= 0 && srcPxY <= imgSize - 1) { center_r[i * samplingRate + j] = pxY; center_c[i * samplingRate + j] = pxX; j++; } } } } else { // central crop for testing for (int i = 0; i < numImages; i++) { r_angle[i] = 0.0; s_ratio[i] = 1.0; for (int j = 0; j < samplingRate; j++) { center_r[i * samplingRate + j] = (imgSize - 1) / 2; center_c[i * samplingRate + j] = (imgSize - 1) / 2; } } } // copy disturbance sequence to gpu hl_memcpy_host2device(gpuAngle, r_angle, sizeof(real) * numImages); hl_memcpy_host2device(gpuScaleRatio, s_ratio, sizeof(real) * numImages); delete[] r_angle; delete[] s_ratio; // copy sampling location sequence to gpu hl_memcpy_host2device(gpuCenterR, center_r, sizeof(int) * numPatches); hl_memcpy_host2device(gpuCenterC, center_c, sizeof(int) * numPatches); delete[] center_r; delete[] center_c; } void hl_conv_random_disturb_with_params(const real* images, int imgSize, int tgtSize, int channels, int numImages, int samplingRate, const real* gpuRotationAngle, const real* gpuScaleRatio, const int* gpuCenterR, const int* gpuCenterC, int paddingValue, real* target) { // The number of output samples. int numPatches = numImages * samplingRate; // The memory size of one output patch. int targetSize = tgtSize * tgtSize; dim3 threadsPerBlock(4, 128); dim3 numBlocks(DIVUP(numPatches, 4), DIVUP(targetSize, 128)); kSamplingPatches<<<numBlocks, threadsPerBlock>>>(images, target, imgSize, tgtSize, channels, samplingRate, gpuRotationAngle, gpuScaleRatio, gpuCenterR, gpuCenterC, paddingValue, numImages); hl_device_synchronize(); } void hl_conv_random_disturb(const real* images, int imgSize, int tgtSize, int channels, int numImages, real scaleRatio, real rotateAngle, int samplingRate, real* gpu_r_angle, real* gpu_s_ratio, int* gpu_center_r, int* gpu_center_c, int paddingValue, bool isTrain, real* targets) { // generate the random disturbance sequence and the sampling locations hl_generate_disturb_params(gpu_r_angle, gpu_s_ratio, gpu_center_r, gpu_center_c, numImages, imgSize, rotateAngle, scaleRatio, samplingRate, isTrain); hl_conv_random_disturb_with_params(images, imgSize, tgtSize, channels, numImages, samplingRate, gpu_r_angle, gpu_s_ratio, gpu_center_r, gpu_center_r, paddingValue, targets); }
dea1533f55ff77d99d58c16620cf9a7cb3b61644.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* DATA_SIZE = BLOCK_SIZE * GRID_SIZE () */ #define DATA_SIZE 16 #define BLOCK_SIZE 8 #define GRID_SIZE (DATA_SIZE/BLOCK_SIZE) __global__ void helloFromGPU() { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("I am blockDim.x=%3d, blockIdx.x=%3d, threadIdx.x=%3d. My target is %3d.\n", blockDim.x, blockIdx.x, threadIdx.x, id); } int main(void) { printf("Hello World from CPU! DATA_SIZE(%d) = BLOCK_SIZE(%d) x GRID_SIZE(%d).\n", DATA_SIZE, BLOCK_SIZE, GRID_SIZE); hipLaunchKernelGGL(( helloFromGPU) , dim3(GRID_SIZE), dim3(BLOCK_SIZE), 0, 0, ); hipDeviceSynchronize(); printf("Goodbye World from CPU!\n"); hipDeviceReset(); return 0; }
dea1533f55ff77d99d58c16620cf9a7cb3b61644.cu
#include <stdio.h> /* DATA_SIZE = BLOCK_SIZE * GRID_SIZE で割り切れること(プログラム側ではノーチェック) */ #define DATA_SIZE 16 #define BLOCK_SIZE 8 #define GRID_SIZE (DATA_SIZE/BLOCK_SIZE) __global__ void helloFromGPU() { int id = blockDim.x * blockIdx.x + threadIdx.x; printf("I am blockDim.x=%3d, blockIdx.x=%3d, threadIdx.x=%3d. My target is %3d.\n", blockDim.x, blockIdx.x, threadIdx.x, id); } int main(void) { printf("Hello World from CPU! DATA_SIZE(%d) = BLOCK_SIZE(%d) x GRID_SIZE(%d).\n", DATA_SIZE, BLOCK_SIZE, GRID_SIZE); helloFromGPU <<<GRID_SIZE, BLOCK_SIZE>>> (); cudaDeviceSynchronize(); printf("Goodbye World from CPU!\n"); cudaDeviceReset(); return 0; }
fb8dd1efc64d32736714e429c8da2c2c33fe7452.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <iostream> using namespace std; #define CHECK(call) \ { \ const hipError_t error = call; \ if(error!=hipSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, hipGetErrorString(error)); \ } \ } \ void InitialInt(int *ip, int size) { for(int i=0; i<size; i++) { ip[i]=i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; cout << "Matrix: " << nx << ", " << ny << endl; for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { printf("%3d", ic[ix]); } ic+=nx; cout << endl; } cout << endl; } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x+blockIdx.x*blockDim.x; int iy = threadIdx.y+blockIdx.y*blockDim.y; unsigned int idx = iy*nx+ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv) { cout << argv[0] << " Starting..." << endl; // get device information int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl; CHECK(hipSetDevice(dev)); // set matrix dimension int nx=8; int ny=6; int nxy=nx*ny; int nBytes = nxy*sizeof(float); // malloc host memory int *h_A; h_A = (int *)malloc(nBytes); // initialize host matrix with integer InitialInt(h_A, nxy); printMatrix(h_A, nx, ny); // malloc device memory int *d_MatA; hipMalloc((void **) &d_MatA, nBytes); // transfer data from host to device hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice); // setup execution configuration dim3 block(4,2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); // invoke the kernel hipLaunchKernelGGL(( printThreadIndex) , dim3(grid), dim3(block), 0, 0, d_MatA, nx, ny); // free host and device memory hipFree(d_MatA); free(h_A); // reset device hipDeviceReset(); return 0; }
fb8dd1efc64d32736714e429c8da2c2c33fe7452.cu
#include <cuda_runtime.h> #include <stdio.h> #include <iostream> using namespace std; #define CHECK(call) \ { \ const cudaError_t error = call; \ if(error!=cudaSuccess) { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \ } \ } \ void InitialInt(int *ip, int size) { for(int i=0; i<size; i++) { ip[i]=i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; cout << "Matrix: " << nx << ", " << ny << endl; for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { printf("%3d", ic[ix]); } ic+=nx; cout << endl; } cout << endl; } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x+blockIdx.x*blockDim.x; int iy = threadIdx.y+blockIdx.y*blockDim.y; unsigned int idx = iy*nx+ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv) { cout << argv[0] << " Starting..." << endl; // get device information int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); std::cout << "Using Device " << dev << ": " << deviceProp.name << std::endl; CHECK(cudaSetDevice(dev)); // set matrix dimension int nx=8; int ny=6; int nxy=nx*ny; int nBytes = nxy*sizeof(float); // malloc host memory int *h_A; h_A = (int *)malloc(nBytes); // initialize host matrix with integer InitialInt(h_A, nxy); printMatrix(h_A, nx, ny); // malloc device memory int *d_MatA; cudaMalloc((void **) &d_MatA, nBytes); // transfer data from host to device cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice); // setup execution configuration dim3 block(4,2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); // invoke the kernel printThreadIndex <<<grid, block>>> (d_MatA, nx, ny); // free host and device memory cudaFree(d_MatA); free(h_A); // reset device cudaDeviceReset(); return 0; }
bd60bfb71a78a603705b8fd68a0b630ea3c06e47.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Implements BGR 3 progressive planars frames batch resize #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "resize_convert.h" __global__ void resizeBGRplanarBatchKernel(hipTextureObject_t texSrc, float *pDst, int nDstPitch, int nDstHeight, int nSrcHeight, int batch, float scaleX, float scaleY, int cropX, int cropY, int cropW, int cropH) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= (int)(cropW/scaleX) || y >= (int)(cropH/scaleY)) return; int frameSize = nDstPitch*nDstHeight; float *p = NULL; for (int i = blockIdx.z; i < batch; i += gridDim.z) { #pragma unroll for (int channel=0; channel < 3; channel++){ p = pDst + i * 3 * frameSize + y * nDstPitch + x + channel * frameSize; *p = tex2D<float>(texSrc, x * scaleX + cropX, ((3 * i + channel) * nSrcHeight + y * scaleY + cropY)); } } } static void resizeBGRplanarBatchCore( float *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, float *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, int nBatchSize, hipStream_t stream, bool whSameResizeRatio, int cropX, int cropY, int cropW, int cropH) { hipTextureObject_t texSrc[2]; int nTiles = 1, h, iTile; h = nSrcHeight * 3 * nBatchSize; while ((h + nTiles - 1) / nTiles > 65536) nTiles++; if (nTiles > 2) return; int batchTile = nBatchSize / nTiles; int batchTileLast = nBatchSize - batchTile * (nTiles-1); for (iTile = 0; iTile < nTiles; ++iTile) { int bs = (iTile == nTiles - 1) ? batchTileLast : batchTile; float *dpSrcNew = dpSrc + iTile * (batchTile * 3 * nSrcHeight * nSrcPitch); hipResourceDesc resDesc = {}; resDesc.resType = hipResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrcNew; resDesc.res.pitch2D.desc = hipCreateChannelDesc<float>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = bs * 3 * nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch * sizeof(float); hipTextureDesc texDesc = {}; texDesc.filterMode = hipFilterModeLinear; texDesc.readMode = hipReadModeElementType; hipCreateTextureObject(&texSrc[iTile], &resDesc, &texDesc, NULL); float *dpDstNew = dpDst + iTile * (batchTile * 3 * nDstHeight * nDstPitch); if(cropW == 0 || cropH == 0) { cropX = 0; cropY = 0; cropW = nSrcWidth; cropH = nSrcHeight; } float scaleX = (cropW*1.0f / nDstWidth); float scaleY = (cropH*1.0f / nDstHeight); if(whSameResizeRatio == true) scaleX = scaleY = scaleX > scaleY ? scaleX : scaleY; dim3 block(32, 32, 1); unsigned int blockDimZ = bs; // Restricting blocks in Z-dim till 32 to not launch too many blocks blockDimZ = (blockDimZ > 32) ? 32 : blockDimZ; dim3 grid((unsigned int)(cropW*1.0f/scaleX + block.x - 1) / block.x, (unsigned int)(cropH*1.0f/scaleY + block.y - 1) / block.y, blockDimZ); hipLaunchKernelGGL(( resizeBGRplanarBatchKernel), dim3(grid), dim3(block), 0, stream, texSrc[iTile], dpDstNew, nDstPitch, nDstHeight, nSrcHeight, bs, scaleX, scaleY, cropX, cropY, cropW, cropH); } for (iTile = 0; iTile < nTiles; ++iTile) hipDestroyTextureObject(texSrc[iTile]); } void resizeBGRplanarBatch( float *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, float *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, int nBatchSize, hipStream_t stream, int cropX, int cropY, int cropW, int cropH, bool whSameResizeRatio) { resizeBGRplanarBatchCore(dpSrc, nSrcPitch, nSrcWidth, nSrcHeight, dpDst, nDstPitch, nDstWidth, nDstHeight, nBatchSize, stream, whSameResizeRatio, cropX, cropY, cropW, cropH); }
bd60bfb71a78a603705b8fd68a0b630ea3c06e47.cu
/* * Copyright 1993-2019 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Implements BGR 3 progressive planars frames batch resize #include <cuda.h> #include <cuda_runtime.h> #include "resize_convert.h" __global__ void resizeBGRplanarBatchKernel(cudaTextureObject_t texSrc, float *pDst, int nDstPitch, int nDstHeight, int nSrcHeight, int batch, float scaleX, float scaleY, int cropX, int cropY, int cropW, int cropH) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= (int)(cropW/scaleX) || y >= (int)(cropH/scaleY)) return; int frameSize = nDstPitch*nDstHeight; float *p = NULL; for (int i = blockIdx.z; i < batch; i += gridDim.z) { #pragma unroll for (int channel=0; channel < 3; channel++){ p = pDst + i * 3 * frameSize + y * nDstPitch + x + channel * frameSize; *p = tex2D<float>(texSrc, x * scaleX + cropX, ((3 * i + channel) * nSrcHeight + y * scaleY + cropY)); } } } static void resizeBGRplanarBatchCore( float *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, float *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, int nBatchSize, cudaStream_t stream, bool whSameResizeRatio, int cropX, int cropY, int cropW, int cropH) { cudaTextureObject_t texSrc[2]; int nTiles = 1, h, iTile; h = nSrcHeight * 3 * nBatchSize; while ((h + nTiles - 1) / nTiles > 65536) nTiles++; if (nTiles > 2) return; int batchTile = nBatchSize / nTiles; int batchTileLast = nBatchSize - batchTile * (nTiles-1); for (iTile = 0; iTile < nTiles; ++iTile) { int bs = (iTile == nTiles - 1) ? batchTileLast : batchTile; float *dpSrcNew = dpSrc + iTile * (batchTile * 3 * nSrcHeight * nSrcPitch); cudaResourceDesc resDesc = {}; resDesc.resType = cudaResourceTypePitch2D; resDesc.res.pitch2D.devPtr = dpSrcNew; resDesc.res.pitch2D.desc = cudaCreateChannelDesc<float>(); resDesc.res.pitch2D.width = nSrcWidth; resDesc.res.pitch2D.height = bs * 3 * nSrcHeight; resDesc.res.pitch2D.pitchInBytes = nSrcPitch * sizeof(float); cudaTextureDesc texDesc = {}; texDesc.filterMode = cudaFilterModeLinear; texDesc.readMode = cudaReadModeElementType; cudaCreateTextureObject(&texSrc[iTile], &resDesc, &texDesc, NULL); float *dpDstNew = dpDst + iTile * (batchTile * 3 * nDstHeight * nDstPitch); if(cropW == 0 || cropH == 0) { cropX = 0; cropY = 0; cropW = nSrcWidth; cropH = nSrcHeight; } float scaleX = (cropW*1.0f / nDstWidth); float scaleY = (cropH*1.0f / nDstHeight); if(whSameResizeRatio == true) scaleX = scaleY = scaleX > scaleY ? scaleX : scaleY; dim3 block(32, 32, 1); unsigned int blockDimZ = bs; // Restricting blocks in Z-dim till 32 to not launch too many blocks blockDimZ = (blockDimZ > 32) ? 32 : blockDimZ; dim3 grid((unsigned int)(cropW*1.0f/scaleX + block.x - 1) / block.x, (unsigned int)(cropH*1.0f/scaleY + block.y - 1) / block.y, blockDimZ); resizeBGRplanarBatchKernel<<<grid, block, 0, stream>>> (texSrc[iTile], dpDstNew, nDstPitch, nDstHeight, nSrcHeight, bs, scaleX, scaleY, cropX, cropY, cropW, cropH); } for (iTile = 0; iTile < nTiles; ++iTile) cudaDestroyTextureObject(texSrc[iTile]); } void resizeBGRplanarBatch( float *dpSrc, int nSrcPitch, int nSrcWidth, int nSrcHeight, float *dpDst, int nDstPitch, int nDstWidth, int nDstHeight, int nBatchSize, cudaStream_t stream, int cropX, int cropY, int cropW, int cropH, bool whSameResizeRatio) { resizeBGRplanarBatchCore(dpSrc, nSrcPitch, nSrcWidth, nSrcHeight, dpDst, nDstPitch, nDstWidth, nDstHeight, nBatchSize, stream, whSameResizeRatio, cropX, cropY, cropW, cropH); }
4c88cc245782a337fc2197a02da67175a42c52ba.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #include <cstdio> #include <helper_math.h> #define TPB 512 __global__ void centroidKernel(const uchar4 *d_img, int *d_centroidCol,int *d_centroidRow, int *d_pixelCount, int width, int height) { __shared__ uint4 s_img[TPB]; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int s_idx = threadIdx.x; const int row = idx / width; const int col = idx - row * width; if ((d_img[idx].x < 255 || d_img[idx].y < 255 || d_img[idx].z < 255) && (idx < width*height)){ s_img[s_idx].x = col; s_img[s_idx].y = row; s_img[s_idx].z = 1; } else { s_img[s_idx].x = 0; s_img[s_idx].y = 0; s_img[s_idx].z = 0; } __syncthreads(); //a more efficient way for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (s_idx < s) { s_img[s_idx] += s_img[s_idx + s]; //printf(s_img[s_idx].z); } __syncthreads(); } if (s_idx == 0) { atomicAdd(d_centroidCol, s_img[0].x); atomicAdd(d_centroidRow, s_img[0].y); atomicAdd(d_pixelCount, s_img[0].z); //printf(d_pixelCount); } } void centroidParallel(uchar4 *img, int width, int height) { uchar4 *d_img = 0; int *d_centroidCol = 0, *d_centroidRow = 0, *d_pixelCount = 0; int centroidCol = 0, centroidRow = 0, pixelCount = 0; //Allocate memory for device array and copy from host hipMalloc(&d_img, width*height * sizeof(uchar4)); hipMemcpy(d_img, img, width*height * sizeof(uchar4), hipMemcpyHostToDevice); //Allocate and set memory for three integers on the device hipMalloc(&d_centroidRow, sizeof(int)); hipMalloc(&d_centroidCol, sizeof(int)); hipMalloc(&d_pixelCount, sizeof(int)); hipMemset(d_centroidRow, 0, sizeof(int)); hipMemset(d_centroidCol, 0, sizeof(int)); hipMemset(d_pixelCount, 0, sizeof(int)); centroidKernel << <(width*height + TPB - 1)/ TPB, TPB >> > (d_img, d_centroidCol, d_centroidRow, d_pixelCount, width, height); //Copy results from device to host hipMemcpy(&centroidRow, d_centroidRow, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&centroidCol, d_centroidCol, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&pixelCount, d_pixelCount, sizeof(int), hipMemcpyDeviceToHost); //printf("ssssssss%d",pixelCount); centroidRow /= pixelCount; centroidCol /= pixelCount; printf("Centroid:{col = %d, row = %d} based on %d pixels\n", centroidCol, centroidRow, pixelCount); //Mark the centroid with red lines for (int col = 0; col < width; col++) { img[centroidRow*width + col].x = 255; img[centroidRow*width + col].y = 0; img[centroidRow*width + col].z = 0; } for (int row = 0; row < height; row++) { img[row*width + centroidCol].x = 255; img[row*width + centroidCol].y = 0; img[row*width + centroidCol].z = 0; } //Free the memory allocated hipFree(d_img); hipFree(d_centroidRow); hipFree(d_centroidCol); hipFree(d_pixelCount); }
4c88cc245782a337fc2197a02da67175a42c52ba.cu
#include "kernel.h" #include <cstdio> #include <helper_math.h> #define TPB 512 __global__ void centroidKernel(const uchar4 *d_img, int *d_centroidCol,int *d_centroidRow, int *d_pixelCount, int width, int height) { __shared__ uint4 s_img[TPB]; const int idx = threadIdx.x + blockDim.x * blockIdx.x; const int s_idx = threadIdx.x; const int row = idx / width; const int col = idx - row * width; if ((d_img[idx].x < 255 || d_img[idx].y < 255 || d_img[idx].z < 255) && (idx < width*height)){ s_img[s_idx].x = col; s_img[s_idx].y = row; s_img[s_idx].z = 1; } else { s_img[s_idx].x = 0; s_img[s_idx].y = 0; s_img[s_idx].z = 0; } __syncthreads(); //a more efficient way for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (s_idx < s) { s_img[s_idx] += s_img[s_idx + s]; //printf(s_img[s_idx].z); } __syncthreads(); } if (s_idx == 0) { atomicAdd(d_centroidCol, s_img[0].x); atomicAdd(d_centroidRow, s_img[0].y); atomicAdd(d_pixelCount, s_img[0].z); //printf(d_pixelCount); } } void centroidParallel(uchar4 *img, int width, int height) { uchar4 *d_img = 0; int *d_centroidCol = 0, *d_centroidRow = 0, *d_pixelCount = 0; int centroidCol = 0, centroidRow = 0, pixelCount = 0; //Allocate memory for device array and copy from host cudaMalloc(&d_img, width*height * sizeof(uchar4)); cudaMemcpy(d_img, img, width*height * sizeof(uchar4), cudaMemcpyHostToDevice); //Allocate and set memory for three integers on the device cudaMalloc(&d_centroidRow, sizeof(int)); cudaMalloc(&d_centroidCol, sizeof(int)); cudaMalloc(&d_pixelCount, sizeof(int)); cudaMemset(d_centroidRow, 0, sizeof(int)); cudaMemset(d_centroidCol, 0, sizeof(int)); cudaMemset(d_pixelCount, 0, sizeof(int)); centroidKernel << <(width*height + TPB - 1)/ TPB, TPB >> > (d_img, d_centroidCol, d_centroidRow, d_pixelCount, width, height); //Copy results from device to host cudaMemcpy(&centroidRow, d_centroidRow, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&centroidCol, d_centroidCol, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&pixelCount, d_pixelCount, sizeof(int), cudaMemcpyDeviceToHost); //printf("ssssssss%d",pixelCount); centroidRow /= pixelCount; centroidCol /= pixelCount; printf("Centroid:{col = %d, row = %d} based on %d pixels\n", centroidCol, centroidRow, pixelCount); //Mark the centroid with red lines for (int col = 0; col < width; col++) { img[centroidRow*width + col].x = 255; img[centroidRow*width + col].y = 0; img[centroidRow*width + col].z = 0; } for (int row = 0; row < height; row++) { img[row*width + centroidCol].x = 255; img[row*width + centroidCol].y = 0; img[row*width + centroidCol].z = 0; } //Free the memory allocated cudaFree(d_img); cudaFree(d_centroidRow); cudaFree(d_centroidCol); cudaFree(d_pixelCount); }
f93e9fd345295bdf1fcdc52b214bad1ea144708f.hip
// !!! This is a file automatically generated by hipify!!! #include "common_hip.cuh" #include <string.h> #include "local_config.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t twoLayerFF(Matrix &a2, Matrix &a3, const Matrix input, const Matrix W1, const Matrix W2, const Matrix b1, const Matrix b2); int main_2ff() { const char *base_dir = BASE_DIR; const char *test_name = "test_2ff"; char filename[256]; char *input_suffiex = ""; char *res_suffiex = "res"; //Matrix A = init_matrix_seq(10, 5); Matrix a2, a3, input, W1, W2, b1, b2; hipError_t cudaStatus; int res = 0; IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a2); // just for a2's size IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a3); // just for a3's size IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, input); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W1); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W2); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, b1); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, b2); // Add vectors in parallel. cudaStatus = twoLayerFF(a2, a3, input, W1, W2, b1, b2); if (cudaStatus != hipSuccess) { fprintf(stderr, "twoLayerFF failed!\n"); res = -1; goto real_exit; } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!\n"); res = -1; goto real_exit; } IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, a2); IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, a3); real_exit: free_matrix(a2); free_matrix(a3); free_matrix(input); free_matrix(W1); free_matrix(W2); free_matrix(b1); free_matrix(b2); return res; } // Helper function for using CUDA to add vectors in parallel. hipError_t twoLayerFF(Matrix &a2, Matrix &a3, const Matrix input, const Matrix W1, const Matrix W2, const Matrix b1, const Matrix b2) { int dInput = input.row; int nSamples = input.col; int dHidden = a2.row; int dOutput = dInput; int i, niter = 500; clock_t startTime, stopTime, elapsedTime; Matrix d_a2, d_a3, d_input, d_W1, d_W2, d_b1, d_b2; hipError_t cudaStatus; gHandler_t * handle = NULL; hipblasStatus_t status; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . CUDA_ZERO_MATRIX(d_a2, a2); CUDA_ZERO_MATRIX(d_a3, a3); CUDA_CLONE_MATRIX(d_input, input); CUDA_CLONE_MATRIX(d_W1, W1); CUDA_CLONE_MATRIX(d_W2, W2); CUDA_CLONE_MATRIX(d_b1, b1); CUDA_CLONE_MATRIX(d_b2, b2); handle = createGlobalHandle(nSamples, dInput, dHidden); fprintf(stderr, "gpu_twolayer_ff\n"); fflush(stderr); startTime = clock(); for (i = 0; i < niter; ++i) if (gpu_twolayer_ff(d_W1, d_b1, d_W2, d_b2, d_input, d_a2, d_a3, handle) == -1) { cudaStatus = hipErrorLaunchFailure; fprintf(stderr, "gpu_twolayer_ff error\n"); } else { CUDA_FETCH_MATRIX(a2, d_a2); CUDA_FETCH_MATRIX(a3, d_a3); } stopTime = clock(); elapsedTime = stopTime - startTime; printf("OWLQN Optimization takes: %5.2f s \n", ((float)elapsedTime/CLOCKS_PER_SEC)); printf("Number of Evaluation: %d\n", niter); Error: destroyGlobalHandle(&handle); hipFree(d_a2.elements); hipFree(d_a3.elements); hipFree(d_input.elements); hipFree(d_W1.elements); hipFree(d_W2.elements); hipFree(d_b1.elements); hipFree(d_b2.elements); return cudaStatus; }
f93e9fd345295bdf1fcdc52b214bad1ea144708f.cu
#include "common.cuh" #include <string.h> #include "local_config.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t twoLayerFF(Matrix &a2, Matrix &a3, const Matrix input, const Matrix W1, const Matrix W2, const Matrix b1, const Matrix b2); int main_2ff() { const char *base_dir = BASE_DIR; const char *test_name = "test_2ff"; char filename[256]; char *input_suffiex = ""; char *res_suffiex = "res"; //Matrix A = init_matrix_seq(10, 5); Matrix a2, a3, input, W1, W2, b1, b2; cudaError_t cudaStatus; int res = 0; IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a2); // just for a2's size IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, a3); // just for a3's size IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, input); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W1); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, W2); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, b1); IO_MATRIX_WRAPPER(filename, base_dir, test_name, input_suffiex, read_matrix, b2); // Add vectors in parallel. cudaStatus = twoLayerFF(a2, a3, input, W1, W2, b1, b2); if (cudaStatus != cudaSuccess) { fprintf(stderr, "twoLayerFF failed!\n"); res = -1; goto real_exit; } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!\n"); res = -1; goto real_exit; } IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, a2); IO_MATRIX_WRAPPER(filename, base_dir, test_name, res_suffiex, write_matrix, a3); real_exit: free_matrix(a2); free_matrix(a3); free_matrix(input); free_matrix(W1); free_matrix(W2); free_matrix(b1); free_matrix(b2); return res; } // Helper function for using CUDA to add vectors in parallel. cudaError_t twoLayerFF(Matrix &a2, Matrix &a3, const Matrix input, const Matrix W1, const Matrix W2, const Matrix b1, const Matrix b2) { int dInput = input.row; int nSamples = input.col; int dHidden = a2.row; int dOutput = dInput; int i, niter = 500; clock_t startTime, stopTime, elapsedTime; Matrix d_a2, d_a3, d_input, d_W1, d_W2, d_b1, d_b2; cudaError_t cudaStatus; gHandler_t * handle = NULL; cublasStatus_t status; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . CUDA_ZERO_MATRIX(d_a2, a2); CUDA_ZERO_MATRIX(d_a3, a3); CUDA_CLONE_MATRIX(d_input, input); CUDA_CLONE_MATRIX(d_W1, W1); CUDA_CLONE_MATRIX(d_W2, W2); CUDA_CLONE_MATRIX(d_b1, b1); CUDA_CLONE_MATRIX(d_b2, b2); handle = createGlobalHandle(nSamples, dInput, dHidden); fprintf(stderr, "gpu_twolayer_ff\n"); fflush(stderr); startTime = clock(); for (i = 0; i < niter; ++i) if (gpu_twolayer_ff(d_W1, d_b1, d_W2, d_b2, d_input, d_a2, d_a3, handle) == -1) { cudaStatus = cudaErrorLaunchFailure; fprintf(stderr, "gpu_twolayer_ff error\n"); } else { CUDA_FETCH_MATRIX(a2, d_a2); CUDA_FETCH_MATRIX(a3, d_a3); } stopTime = clock(); elapsedTime = stopTime - startTime; printf("OWLQN Optimization takes: %5.2f s \n", ((float)elapsedTime/CLOCKS_PER_SEC)); printf("Number of Evaluation: %d\n", niter); Error: destroyGlobalHandle(&handle); cudaFree(d_a2.elements); cudaFree(d_a3.elements); cudaFree(d_input.elements); cudaFree(d_W1.elements); cudaFree(d_W2.elements); cudaFree(d_b1.elements); cudaFree(d_b2.elements); return cudaStatus; }
039c2e049fefeb773d900738ba71317c8201a653.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void scanBlks(unsigned int *in, unsigned int *out, unsigned int n, unsigned int *blkSums) { extern __shared__ int blkData[]; int i1 = blockIdx.x * 2 * blockDim.x + threadIdx.x; int i2 = i1 + blockDim.x; if (i1 < n) blkData[threadIdx.x] = in[i1]; if (i2 < n) blkData[threadIdx.x + blockDim.x] = in[i2]; __syncthreads(); for (int stride = 1; stride < 2 * blockDim.x; stride *= 2) { int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1; if (blkDataIdx < 2 * blockDim.x) blkData[blkDataIdx] += blkData[blkDataIdx - stride]; __syncthreads(); } for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1 + stride; if (blkDataIdx < 2 * blockDim.x) blkData[blkDataIdx] += blkData[blkDataIdx - stride]; __syncthreads(); } if (i1 < n) out[i1] = blkData[threadIdx.x]; if (i2 < n) out[i2] = blkData[threadIdx.x + blockDim.x]; if (blkSums != NULL && threadIdx.x == 0) blkSums[blockIdx.x] = blkData[2 * blockDim.x - 1]; }
039c2e049fefeb773d900738ba71317c8201a653.cu
#include "includes.h" __global__ void scanBlks(unsigned int *in, unsigned int *out, unsigned int n, unsigned int *blkSums) { extern __shared__ int blkData[]; int i1 = blockIdx.x * 2 * blockDim.x + threadIdx.x; int i2 = i1 + blockDim.x; if (i1 < n) blkData[threadIdx.x] = in[i1]; if (i2 < n) blkData[threadIdx.x + blockDim.x] = in[i2]; __syncthreads(); for (int stride = 1; stride < 2 * blockDim.x; stride *= 2) { int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1; if (blkDataIdx < 2 * blockDim.x) blkData[blkDataIdx] += blkData[blkDataIdx - stride]; __syncthreads(); } for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { int blkDataIdx = (threadIdx.x + 1) * 2 * stride - 1 + stride; if (blkDataIdx < 2 * blockDim.x) blkData[blkDataIdx] += blkData[blkDataIdx - stride]; __syncthreads(); } if (i1 < n) out[i1] = blkData[threadIdx.x]; if (i2 < n) out[i2] = blkData[threadIdx.x + blockDim.x]; if (blkSums != NULL && threadIdx.x == 0) blkSums[blockIdx.x] = blkData[2 * blockDim.x - 1]; }
230f68b997c8594111ead65f5f3f675d56bb5f97.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * See LICENSE for clarification regarding multiple authors */ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <limits> #include <numeric> #include <set> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/tensor.h" #include "k2/csrc/test_utils.h" namespace k2 { class RaggedShapeOpsSuiteTest : public ::testing::Test { protected: RaggedShapeOpsSuiteTest() { ContextPtr context = GetCpuContext(); const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; std::vector<RaggedShapeLayer> axes; axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); simple_shape_ = RaggedShape(axes, true); // random_shape_ is on CPU random_shape_ = RandomRaggedShape(true, // set_row_ids 3, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements } RaggedShape simple_shape_; RaggedShape random_shape_; }; TEST(RaggedShapeTest, TestConstructFromString) { RaggedShape rs(" [ [ x x ] [x] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); RaggedShape rs2(" [ [ [ x x ] ] [[x]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; K2_CHECK_EQ(RaggedShape("[ ]").Dim0(), 0); ASSERT_DEATH(RaggedShape(" [ [ x x ] [x] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x x ] [[x]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x [] x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x | x ] "), ""); for (int i = 0; i < 5; i++) { RaggedShape rs = RandomRaggedShape(true, 2, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements std::ostringstream os; os << rs; RaggedShape rs2; std::istringstream is(os.str()); K2_LOG(INFO) << "Shape is: " << os.str(); is >> rs2; K2_CHECK(is.good()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(rs, rs2) || rs.NumElements() == 0); } } TEST(RaggedTest, TestRaggedFromString) { Ragged<int32_t> rs(" [ [ 1 2 ] [3] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); K2_CHECK_EQ(rs.values.Back(), 3); K2_CHECK_EQ(rs.values[0], 1); Ragged<int32_t> rs2(" [ [ [ 0 5 ] ] [[10]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [0] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [[0]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 [] 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 | 0 ] "), ""); for (int32_t i = 0; i < 5; i++) { Ragged<int32_t> r = RandomRagged<int32_t>(); std::ostringstream os; os << r; Ragged<int32_t> r2(os.str()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(r, r2) || r.values.Dim() == 0); } } template <typename T> void TestMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits = {0}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = 0; std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> max_values(context, num_rows); // just run to check if there's any error MaxPerSublist(ragged, 1, &max_values); EXPECT_EQ(max_values.Dim(), 0); } { const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 2, 8, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> max_values(context, num_rows); T default_value = 2; MaxPerSublist(ragged, default_value, &max_values); // copy memory from GPU/CPU to CPU std::vector<T> cpu_data(max_values.Dim()); max_values.Context()->CopyDataTo( max_values.Dim() * max_values.ElementSize(), max_values.Data(), cpu, cpu_data.data()); std::vector<T> expected_data = {3, default_value, 8, default_value}; EXPECT_EQ(cpu_data, expected_data); } { // test with random large size const int32_t min_num_elements = 2000; // not random shape is on CPU RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_elements, 5000); ASSERT_EQ(shape.NumAxes(), 2); RaggedShape gpu_shape; if (context->GetDeviceType() == kCuda) { // copy shape to GPU const Array1<T> &row_splits = shape.RowSplits(1); RaggedShapeLayer shape_dim; shape_dim.row_splits = row_splits.To(GetCudaContext()); shape_dim.cached_tot_size = shape.NumElements(); std::vector<RaggedShapeLayer> axes = {shape_dim}; gpu_shape = RaggedShape(axes, true); } int32_t num_elems = shape.NumElements(); std::vector<T> data(num_elems); for (int32_t i = 0; i != 10; ++i) { std::iota(data.begin(), data.end(), 0); // randomly set data[pos] = num_elems which is // greater than any element in data int32_t pos = RandInt(0, num_elems - 1); data[pos] = num_elems; // find the corresponding row int32_t num_rows = shape.Dim0(); const int32_t *row_splits_data = shape.RowSplits(1).Data(); int32_t row = 0; for (int32_t i = 0; i < num_rows; ++i) { if (pos >= row_splits_data[i] && pos < row_splits_data[i + 1]) { row = i; break; } } Array1<T> values(context, data); Ragged<T> ragged(context->GetDeviceType() == kCuda ? gpu_shape : shape, values); Array1<T> max_values(context, num_rows); T default_value = 0; MaxPerSublist(ragged, default_value, &max_values); EXPECT_EQ(max_values[row], num_elems); } } } } TEST(RaggedShapeOpsTest, MaxPerSubListTest) { TestMaxPerSubListTest<int32_t>(); } template <typename T> void TestArgMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits_vec = {0}; Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<int32_t> argmax_values(context, num_rows); // just run to check if there's any error ArgMaxPerSublist(ragged, 1, &argmax_values); EXPECT_EQ(argmax_values.Dim(), 0); } { const std::vector<int32_t> row_splits_vec = {0, 3, 3, 6, 7}; Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 2, 1, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> argmax_values(context, num_rows); T default_value = 2; ArgMaxPerSublist(ragged, default_value, &argmax_values); std::vector<T> expected_data = {2, -1, 3, -1}; CheckArrayData(argmax_values, expected_data); } { // test with random large size ContextPtr cpu = GetCpuContext(); for (int32_t i = 0; i != 10; ++i) { Ragged<int32_t> ragged = RandomRagged<int32_t>(0, 1000, 2, 4, 0, 5000).To(context); int32_t last_axis = ragged.NumAxes() - 1; Array1<int32_t> argmax_values(context, ragged.RowSplits(last_axis).Dim() - 1); int32_t default_value = 2; ArgMaxPerSublist(ragged, default_value, &argmax_values); ragged = ragged.To(cpu); argmax_values = argmax_values.To(cpu); Array1<int32_t> row_splits = ragged.RowSplits(last_axis); int32_t rows = row_splits.Dim() - 1; for (int32_t row = 0; row < rows; row++) { int32_t begin = row_splits[row], end = row_splits[row + 1]; int32_t max_val = 2, best_pos = -1; for (int32_t pos = begin; pos < end; pos++) { if (ragged.values[pos] >= max_val) { max_val = ragged.values[pos]; best_pos = pos; } } EXPECT_EQ(argmax_values[row], best_pos); } } } } } TEST(RaggedShapeOpsTest, ArgMaxPerSubListTest) { TestArgMaxPerSubListTest<int32_t>(); } template <typename T> void TestMinPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case std::vector<int32_t> row_splits_vec = {0}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> min_values(context, num_rows); // just run to check if there's any error MinPerSublist(ragged, 1, &min_values); EXPECT_EQ(min_values.Dim(), 0); } { std::vector<int32_t> row_splits_vec = {0, 2, 2, 5, 6}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 8, 4, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> min_values(context, num_rows); T default_value = 2; MinPerSublist(ragged, default_value, &min_values); // copy memory from GPU/CPU to CPU min_values = min_values.To(cpu); std::vector<T> cpu_data(min_values.Data(), min_values.Data() + min_values.Dim()); std::vector<T> expected_data = {1, default_value, default_value, -1}; EXPECT_EQ(cpu_data, expected_data); } // May add tests for random large size? (but maybe it's fine to not add as // we have tested large cases in MaxPerSubList) } } TEST(RaggedShapeOpsTest, MinPerSubListTest) { TestMinPerSubListTest<int32_t>(); } template <typename T> void TestAndOrPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // And const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 6, 11, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = -1; AndPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {1, -1, 2, 0}; EXPECT_EQ(cpu_data, expected_data); } { // Or const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 4, 6, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = 0; OrPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {3, 0, 7, 0}; EXPECT_EQ(cpu_data, expected_data); } } } TEST(RaggedShapeOpsTest, AndOrPerSubListTest) { TestAndOrPerSubListTest<int32_t>(); } void TestUnsqueeze(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); src_shape.Populate(); // set row_ids { // axis = 0. RaggedShape shape = Unsqueeze(src_shape, 0); int32_t dim0 = src_shape.Dim0(); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, dim0}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data(dim0, 0); CheckArrayData(row_ids0, data); } { for (size_t i = 0; i != src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = Unsqueeze(src_shape, axis); int32_t tot_size = shape.TotSize(axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { for (int32_t i = 0; i < axis; ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i].row_ids); } } { const Array1<int32_t> &row_splits = dest_axes[axis].row_splits; std::vector<int32_t> data(tot_size + 1); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_splits, data); } { const Array1<int32_t> &row_ids = dest_axes[axis].row_ids; std::vector<int32_t> data(tot_size); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_ids, data); } { for (std::size_t i = axis; i < src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueeze) { TestUnsqueeze(simple_shape_); TestUnsqueeze(random_shape_); } TEST(RaggedShapeOpsTest, TestUnsqueezeParallel) { for (int32_t i = 0; i < 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_shapes = RandInt(0, 10); std::vector<RaggedShape *> orig_shapes; for (int32_t i = 0; i < num_shapes; i++) orig_shapes.push_back( new RaggedShape(RandomRaggedShape(false, 2, 5, 0, 1000).To(c))); int32_t axis = 0; // only one supported for now. std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_shapes, orig_shapes.data(), axis); for (int32_t i = 0; i < num_shapes; i++) { ASSERT_EQ(unsqueezed[i].Validate(), true); RaggedShape temp = RemoveAxis(unsqueezed[i], axis); ASSERT_EQ(Equal(temp, *(orig_shapes[i])), true); delete orig_shapes[i]; } } } void TestRemoveAxis(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); ASSERT_EQ(src_shape.NumAxes(), 4); { // axis = 0. int32_t axis = 0; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, 3, 7, 10}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data = {0, 0, 0, 1, 1, 1, 1, 2, 2, 2}; CheckArrayData(row_ids0, data); } { for (std::size_t i = 1; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 3 int32_t axis = 3; // the last axis RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxis) { TestRemoveAxis(simple_shape_); } TEST(RaggedShapeOpsTest, TestGetOffsets) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { for (int32_t i = 0; i != 2; ++i) { int32_t num_shape = RandInt(10, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } RaggedShape **shapes_ptr = shapes.data(); Array2<int32_t> offsets = GetOffsets(num_shape, shapes_ptr); ASSERT_EQ(offsets.Dim0(), num_axes + 1); ASSERT_EQ(offsets.Dim1(), num_shape + 1); auto acc = offsets.Accessor(); for (int32_t axis = 0; axis <= num_axes; ++axis) { int32_t sum = 0; for (int32_t j = 0; j <= num_shape; ++j) { EXPECT_EQ(acc(axis, j), sum); if (j < num_shape) { sum += (axis == 0 ? 1 : shape_vec[j].TotSize(axis - 1)); } } } } } } // returns a random ragged shape where the dims on axis 1 are all the same // (so: can be transposed). RaggedShape RandomRaggedShapeToTranspose(ContextPtr c) { ContextPtr c_cpu = GetCpuContext(); RaggedShape random = RandomRaggedShape(false, 2, 4, 0, 5000).To(c); int32_t input_dim0 = random.Dim0(), divisor = 1; for (int32_t i = 1; i * i <= input_dim0; i++) { if (input_dim0 % i == 0 && i > divisor) divisor = i; } int32_t output_dim0 = divisor, output_dim1 = input_dim0 / divisor; Array1<int32_t> row_splits = Range<int32_t>(c, output_dim0 + 1, 0, output_dim1); int32_t cached_tot_size = input_dim0; RaggedShape top_level_shape = RaggedShape2(&row_splits, nullptr, cached_tot_size); return ComposeRaggedShapes(top_level_shape, random); } TEST(RaggedShapeOpsTest, TestTranspose) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); RaggedShape shape = Transpose(src_shape); EXPECT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); RaggedShape transposed = Transpose(to_transpose); if (context->GetDeviceType() != kCpu) { to_transpose = to_transpose.To(cpu); transposed = transposed.To(cpu); } for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t i = transposed[index]; // Just make sure this doesn't crash, // don't need the value. std::swap(index[0], index[1]); i = to_transpose[index]; // don't need the value, just need to make // sure it's an allowable index. ++i; // this line just suppresses the warning `variable i set but not // used` } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); std::swap(index[0], index[1]); int32_t i = transposed[index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } template <typename T> void TestTransposeRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); std::vector<T> values = {0, 1, 2, 3, 4, 5, 8, 7, 6, 9, 10, 15}; ASSERT_EQ(values.size(), src_shape.NumElements()); Array1<T> values_array(context, values); Ragged<T> ragged(src_shape, values_array); Ragged<T> ans = Transpose(ragged); RaggedShape shape = ans.shape; // Check shape ASSERT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); // Check values CheckArrayData(ans.values, {0, 1, 2, 4, 5, 8, 6, 9, 3, 7, 10, 15}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); int32_t num_elems = to_transpose.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); Ragged<T> src(to_transpose, src_values); Ragged<T> ans = Transpose(src); if (context->GetDeviceType() == kCuda) { src = src.To(cpu); ans = ans.To(cpu); to_transpose = to_transpose.To(cpu); } RaggedShape transposed = ans.shape; for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = ans[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, src[index]); } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = src[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, ans[index]); } } } } } TEST(RaggedTest, TestTransposeRagged) { TestTransposeRagged<int32_t>(); TestTransposeRagged<double>(); } void TestRaggedShape2(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 2); Array1<int32_t> row_splits = src_shape.RowSplits(1); Array1<int32_t> row_ids = src_shape.RowIds(1); int32_t cached_tot_size = src_shape.TotSize(1); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape2(&row_splits, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // both row_splits and row_ids are non-null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null RaggedShape result = RaggedShape2(&row_splits, nullptr, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids if (row_splits.Dim() == (row_ids.Dim() == 0 ? 1 : row_ids.Back() + 2)) { { // row_splits is null RaggedShape result = RaggedShape2(nullptr, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_splits is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(nullptr, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2) { TestRaggedShape2(simple_shape_); TestRaggedShape2(random_shape_); } void TestRaggedShape3(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); int32_t cached_tot_size1 = src_shape.TotSize(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); int32_t cached_tot_size2 = src_shape.TotSize(2); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape3(&row_splits1, &row_ids1, cached_tot_size1, &row_splits2, &row_ids2, cached_tot_size2); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } { // row_ids is non-null, cached_tot_size = -1 RaggedShape result = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids bool valid1 = (row_splits1.Dim() == (row_ids1.Dim() == 0 ? 1 : row_ids1.Back() + 2)); bool valid2 = (row_splits2.Dim() == (row_ids2.Dim() == 0 ? 1 : row_ids2.Back() + 2)); if (valid1 && valid2) { RaggedShape result = RaggedShape3(nullptr, &row_ids1, -1, nullptr, &row_ids2, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // TODO(haowen): add more cases for other branches } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3) { TestRaggedShape3(simple_shape_); TestRaggedShape3(random_shape_); } void TestComposeShape(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); RaggedShape shape1 = RaggedShape2(&row_splits1, nullptr, -1); RaggedShape shape2 = RaggedShape2(&row_splits2, nullptr, -1); RaggedShape result = ComposeRaggedShapes(shape1, shape2); ASSERT_EQ(result.NumAxes(), 3); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); } } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShape) { TestComposeShape(simple_shape_); TestComposeShape(random_shape_); } void TestShapeFromTotSize(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 2); int32_t num_axes = src_shape.NumAxes(); std::vector<int32_t> tot_sizes(num_axes); for (int32_t i = 0; i != num_axes; ++i) { tot_sizes[i] = src_shape.TotSize(i); } RaggedShape result = RaggedShapeFromTotSizes(context, num_axes, tot_sizes.data()); ASSERT_EQ(result.NumAxes(), num_axes); for (int32_t i = 0; i < num_axes; ++i) { EXPECT_EQ(result.TotSize(i), src_shape.TotSize(i)); if (i > 0) { EXPECT_EQ(result.RowSplits(i).Dim(), src_shape.RowSplits(i).Dim()); EXPECT_EQ(result.RowIds(i).Dim(), src_shape.RowIds(i).Dim()); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSize) { TestShapeFromTotSize(simple_shape_); TestShapeFromTotSize(random_shape_); } template <typename T> void TestRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // constructed with row_splits and row_ids // RaggedTensor4 t = [ // [ [[ 1, 2], [4]], [[3, 0]] ], // [ [[7, 8, 9]], [[6], [3, 5, 7]], [[2]] ], // [ [[3, 4], [], [8]] ] // ] const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; const std::vector<T> values_vec = {1, 2, 4, 3, 0, 7, 8, 9, 6, 3, 5, 7, 2, 3, 4, 8}; std::vector<RaggedShapeLayer> axes; axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); RaggedShape shape(axes, true); Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); // test Index(axis, i) { // values: [[[ 1, 2], [4]], [[3, 0]]] Ragged<T> sub_raggged = ragged.Index(0, 0); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 2, 3}, {0, 2, 3, 5}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {1, 2, 4, 3, 0}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[7, 8, 9]], [[6], [3, 5, 7]], [[2]]] Ragged<T> sub_raggged = ragged.Index(0, 1); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 1, 3, 4}, {0, 3, 4, 7, 8}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {7, 8, 9, 6, 3, 5, 7, 2}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[3, 4], [], [8]]] Ragged<T> sub_raggged = ragged.Index(0, 2); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 3}, {0, 2, 2, 3}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {3, 4, 8}; CheckArrayData<T>(sub_values, sub_values_vec); } // test operator[](const std::vector<int32_t> &indexes) if (context->GetDeviceType() == kCpu) { { std::vector<int32_t> indexes = {0, 0, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 0); EXPECT_EQ(ragged[indexes], 1); } { std::vector<int32_t> indexes = {0, 1, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 3); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {1, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 6); EXPECT_EQ(ragged[indexes], 8); } { std::vector<int32_t> indexes = {1, 1, 1, 0}; EXPECT_EQ(ragged.shape[indexes], 9); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {2, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 14); EXPECT_EQ(ragged[indexes], 4); } { std::vector<int32_t> indexes = {2, 0, 2, 0}; EXPECT_EQ(ragged.shape[indexes], 15); EXPECT_EQ(ragged[indexes], 8); } } const std::vector<std::vector<int32_t>> row_splits_vec = { row_splits1, row_splits2, row_splits3}; // test To(ctx) { // to GPU Ragged<T> other = ragged.To(GetCudaContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } { // to CPU Ragged<T> other = ragged.To(GetCpuContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } } } } template <typename T, typename OP = LessThan<T>> static void CpuSortSublists(const Array1<int32_t> &row_splits, Array1<T> *src) { K2_CHECK(src->Context()->GetDeviceType() == kCpu); T *p = src->Data(); OP comp = OP(); for (int32_t i = 0; i < row_splits.Dim() - 1; ++i) { int32_t cur = row_splits[i]; int32_t next = row_splits[i + 1]; std::sort(p + cur, p + next, comp); } } template <typename T, typename OP = LessThan<T>> static void TestSortSublists() { auto cpu_context = GetCpuContext(); auto cuda_context = GetCudaContext(); RaggedShape shape = RandomRaggedShape(false, // set_row_ids 2, // min_num_axes 4, // max_num_axes 1, // min_num_elements 2000); // max_num_elements Array1<T> values = RandUniformArray1<T>(shape.Context(), shape.NumElements(), -2000, 2000); Ragged<T> ragged(shape, values); ragged = ragged.To(cuda_context); values = values.To(cpu_context); // to be sorted by cpu Array1<T> unsorted = values.Clone(); Array1<int32_t> order(ragged.Context(), ragged.values.Dim()); SortSublists<T, OP>(&ragged, &order); Array1<int32_t> &segment = ragged.shape.RowSplits(ragged.NumAxes() - 1); CpuSortSublists<T, OP>(segment, &values); int32_t n = order.Dim(); for (int i = 0; i != n; ++i) { EXPECT_EQ(values[i], ragged.values[i]); EXPECT_EQ(ragged.values[i], unsorted[order[i]]); } } TEST(RaggedTest, Ragged) { TestRagged<int32_t>(); TestRagged<double>(); TestSortSublists<int32_t>(); TestSortSublists<double>(); } TEST(RaggedShapeOpsTest, TestAppend) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes[1] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[1] = &shapes[1]; } { // axis == 1 RaggedShape result = Append(1, 2, shapes_ptr.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } } { // axis == 0 RaggedShape result = Append(0, 2, shapes_ptr.data()); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto i = 0; i < 2; ++i) { std::vector<const Array1<int32_t> *> splits_ptr = { &row_splits_vec[i][0], &row_splits_vec[i][1]}; Array1<int32_t> curr_row_splits = SpliceRowSplits(2, splits_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } // only test case axis == 0, test axis==1 with simple case is good // enough as it just calls Stack RaggedShape result = Append(0, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (int32_t axis = 1; axis < num_axes; ++axis) { std::vector<Array1<int32_t>> splits_vec(num_shape); std::vector<const Array1<int32_t> *> splits_vec_ptr(num_shape); for (int32_t n = 0; n != num_shape; ++n) { splits_vec[n] = shape_vec[n].RowSplits(axis); splits_vec_ptr[n] = &splits_vec[n]; } Array1<int32_t> curr_row_splits = SpliceRowSplits(num_shape, splits_vec_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } // check data for (int32_t axis = 1; axis < num_axes; ++axis) { CheckArrayData(result.RowSplits(axis), result_splits[axis - 1]); CheckArrayData(result.RowIds(axis), result_ids[axis - 1]); } } } } } template <typename T> void TestAppendRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // TODO(haowen): remove duplicate code in TestAppend above. // test with simple case could be good enough, as we have tested // Append(RaggedShape&) already. std::vector<Ragged<T>> ragged_vec(2); std::vector<Ragged<T> *> ragged(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<T> values_vec = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[0] = Ragged<T>(shape, values); ragged[0] = &ragged_vec[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; const std::vector<T> values_vec = {20, 21, 23, 28, 30, 32, 35}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[1] = Ragged<T>(shape, values); ragged[1] = &ragged_vec[1]; } { // axis == 0 Ragged<T> result = Append(0, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 1, 1, 1, 2, 3, 4, 4, 5}, {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18, 20, 21, 23, 28, 30, 32, 35}; CheckArrayData(result.values, expected_data); } { // axis == 1 Ragged<T> result = Append(1, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 20, 21, 23, 7, 9, 10, 12, 28, 30, 14, 15, 18, 32, 35}; CheckArrayData(result.values, expected_data); } } } TEST(RaggedTest, TestAppendRagged) { TestAppendRagged<int32_t>(); TestAppendRagged<double>(); } void CheckResultOfIndex(const ContextPtr &context, RaggedShape shape, Array1<int32_t> new2old, RaggedShape result) { K2_CHECK(context->IsCompatible(*shape.Context())); ContextPtr cpu = GetCpuContext(); // will use to copy data int32_t num_axes = shape.NumAxes(); int32_t src_dim0 = shape.Dim0(), result_dim0 = result.Dim0(); EXPECT_EQ(result_dim0, new2old.Dim()); result.Check(); for (int32_t i = 0; i < result_dim0; i++) { RaggedShape result_part = Arange(result, 0, i, i + 1); if (new2old[i] == -1) { K2_CHECK_EQ(0, result_part.TotSize(1)); } else { RaggedShape src_part = Arange(shape, 0, new2old[i], new2old[i] + 1); K2_CHECK_EQ(true, Equal(src_part, result_part)); } } } TEST(RaggedShapeOpsTest, TestIndex) { for (int i = 0; i < 5; i++) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); std::vector<int32_t> new2old_vec = {2, 1}; Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes_out; RaggedShape result = Index(shape, 0, new2old, &value_indexes_out); // fsa 2, state_idx01 {5}, arc_idx012 {7, 8, 9} // fsa 1, state_idx01 {2, 3, 4}, arc_idx012 {{3},{4, 5}, {6}} CheckArrayData(value_indexes_out, std::vector<int32_t>{7, 8, 9, 3, 4, 5, 6}); CheckResultOfIndex(context, shape, new2old, result); } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(), result_dim0 = RandInt(0, 10); if (dim0 == 0) result_dim0 = 0; std::vector<int32_t> new2old_vec(result_dim0); for (int i = 0; i < result_dim0; i++) new2old_vec[i] = RandInt(-1, dim0 - 1); Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes; RaggedShape result = Index(shape, 0, new2old, &value_indexes); CheckResultOfIndex(context, shape, new2old, result); K2_LOG(INFO) << "Value_indexes = " << value_indexes; } } } } } TEST(RaggedShapeOpsTest, TestIndexAxis1) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Ragged<int32_t> input = Ragged<int32_t>(" [ [ 1 2 ] [ 3 4 5 ] [ 6 7 ] [ ] ]").To(context); // NOLINT Array1<int32_t> indexes = Array1<int32_t>(" [ 1 0 4 2 6 5 ]").To(context); Ragged<int32_t> output = Ragged<int32_t>(" [ [ 2 1 ] [ 5 3 ] [ 7 6 ] [ ] ]").To(context); // NOLINT Ragged<int32_t> indexed = Index(input, 1, indexes); EXPECT_EQ(Equal(output, indexed), true); } } } TEST(GetTransposeReordering, NoDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 e4 // row2 f5 // row3 g6 h7 i8 // row4 j9 // row5 k10 l11 std::vector<int32_t> col_indexes{4, 5, 0, 1, 5, 3, 0, 2, 4, 5, 1, 4}; std::vector<int32_t> _row_splits{0, 2, 5, 6, 9, 10, 12}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 6, 3, 10, 7, 5, 0, 8, 11, 1, 4, 9}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, ThreeAxesEmptyCase) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> ragged("[ [ [ ] ] ]"); ragged = ragged.To(context); Array1<int32_t> order = GetTransposeReordering(ragged, 0); } } TEST(GetTransposeReordering, NoDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 // row2 e4 // row3 f5 g6 h7 // row4 i8 // row5 j9 k10 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 3, 0, 2, 1, 0, 1, 3, 5, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 2, 4, 5, 8, 9, 11}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 5, 0, 4, 6, 3, 1, 7, 9, 8, 10}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 std::vector<int32_t> col_indexes{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 3, 5}; std::vector<int32_t> _row_splits{0, 5, 8, 9, 13, 16, 18}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 16, 13, 14, 15, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 5, 8, 9, 13, 16, 18}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 13, 14, 15, 16, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, RandomFsaVecTest) { for (int32_t iter = 0; iter != 8; ++iter) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { int n = RandInt(100, 200); int32_t min_num_fsas = n; int32_t max_num_fsas = n * 2; bool acyclic = false; int32_t max_symbol = 100; int32_t min_num_arcs = min_num_fsas * 10; int32_t max_num_arcs = max_num_fsas * 20; FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol, min_num_arcs, max_num_arcs); fsas = fsas.To(context); Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); Array1<int32_t> order = GetTransposeReordering(dest_states_tensor, num_states); Sort(&order); ASSERT_EQ(order.Dim(), num_arcs); Array1<int32_t> expected = Range<int32_t>(context, num_arcs, 0); CheckArrayData(order, expected); } } } TEST(ChangeSublistSize, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9}); size_delta = -2; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 1}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5}); } } TEST(ChangeSublistSizePinned, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9, 9}); size_delta = -3; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 0, 0}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5, 5}); } } } TEST(ChangeSublistSize, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 21}); // it is an error to use -2 here // because the state (state_idx01 == 2) has only 1 entry size_delta = -1; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 2, 3, 3, 4, 6}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 11}); } } TEST(ChangeSublistSizePinned, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 16, 21}); size_delta = -2; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 1, 1, 1, 1, 1, 2}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); } } TEST(RaggedShapeOpsTest, TestGetCountsPartitioned) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // Testing with simple case is good enough as we have tested GetCounts() // with random large size and GetCountsPartitioned just calls GetCounts. std::vector<int32_t> src_row_splits_vec = {0, 3, 4, 6, 10}; Array1<int32_t> src_row_splits(context, src_row_splits_vec); RaggedShape src_shape = RaggedShape2(&src_row_splits, nullptr, -1); std::vector<int32_t> src_values_vec = {0, 1, 0, 2, 5, 5, 7, 7, 9, 7}; Array1<int32_t> src_values(context, src_values_vec); Ragged<int32_t> src(src_shape, src_values); std::vector<int32_t> ans_row_splits_vec = {0, 2, 4, 7, 10}; Array1<int32_t> ans_row_splits(context, ans_row_splits_vec); RaggedShape ans_shape = RaggedShape2(&ans_row_splits, nullptr, -1); Ragged<int32_t> result = GetCountsPartitioned(src, ans_shape); ASSERT_EQ(result.NumAxes(), 2); // Check row_splits Array1<int32_t> row_splits = result.shape.RowSplits(1).To(cpu); std::vector<int32_t> result_row_splits( row_splits.Data(), row_splits.Data() + row_splits.Dim()); EXPECT_EQ(result_row_splits, ans_row_splits_vec); // check values std::vector<int32_t> expected_data = {2, 1, 1, 0, 0, 2, 0, 3, 0, 1}; Array1<int32_t> values = result.values.To(cpu); std::vector<int32_t> data(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(data, expected_data); } } TEST(RaggedShapeOpsTest, TestStack) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[1] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[1] = &shapes[1]; } std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6}, {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); RaggedShape transpose = Transpose(result); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(transpose.RowSplits(i + 1), expected_row_splits[i]); } } } { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); shape_vec[j] = new_shape; shapes[j] = &shape_vec[j]; } std::vector<RaggedShape> cpu_shapes(num_shape); for (auto i = 0; i != num_shape; ++i) { cpu_shapes[i] = shape_vec[i].To(cpu); } { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } } template <typename T> void TestStackRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<Ragged<T>> ragged_vec(num_shape); std::vector<Ragged<T> *> ragged(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); int32_t num_elems = new_shape.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); ragged_vec[j] = Ragged<T>(new_shape, src_values); ragged[j] = &ragged_vec[j]; } std::vector<Ragged<T>> cpu_ragged_vec(num_shape); for (auto j = 0; j != num_shape; ++j) { cpu_ragged_vec[j] = ragged_vec[j].To(cpu); } { // axis == 0 int32_t axis = 0; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] EXPECT_EQ(value, cpu_ragged_vec[i][index]); } } { // axis == 1 int32_t axis = 1; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t j = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] EXPECT_EQ(value, cpu_ragged_vec[j][index]); } } } } } TEST(RaggedTest, TestStackRagged) { TestStackRagged<int32_t>(); TestStackRagged<double>(); } TEST(RaggedTest, TestMaxSize) { for (int32_t i = 0; i <= 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(c); int32_t axis = RandInt(1, num_axes - 1); int32_t max_size = shape.MaxSize(axis); if (axis == 0) { K2_CHECK(max_size == shape.Dim0()); } else { Array1<int32_t> row_splits = shape.RowSplits(axis).To(GetCpuContext()); int32_t *row_splits_data = row_splits.Data(); int32_t m = 0; for (int32_t i = 0; i + 1 < row_splits.Dim(); i++) { int32_t size = row_splits_data[i + 1] - row_splits_data[i]; if (size > m) m = size; } ASSERT_EQ(m, max_size); } } } TEST(RaggedShapeOpsTest, TestMakeTransposable) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2, 3, 3}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, // 6, 7}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6, 9, 12}, {0, 2, 3, 3, 4, 6, 7, 10, 10, 10, 12, 13, 13}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}, {0, 0, 1, 3, 4, 4, 5, 6, 6, 6, 9, 9, 10}}; RaggedShape result = MakeTransposable(shape); for (int32_t i = 1; i != 3; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); CheckArrayData(result.RowIds(i), expected_row_ids[i - 1]); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t max_size = shape.MaxSize(1); RaggedShape result = MakeTransposable(shape); shape = shape.To(cpu); result = result.To(cpu); EXPECT_EQ(result.Dim0(), dim0); EXPECT_EQ(result.TotSize(1), dim0 * max_size); // check if every sub list in axis 1 has the same size int32_t *row_splits1 = result.RowSplits(1).Data(); for (int32_t j = 0; j != dim0 + 1; ++j) { EXPECT_EQ(row_splits1[j], j * max_size); } if (num_axes > 2) { for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { const std::vector<int32_t> &index = iter.Value(); EXPECT_EQ(shape[index], result[index]); } } } } } } TEST(RaggedShapeOpsTest, PrefixTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 4); EXPECT_EQ(num_axes, 3); { // n == 0 int32_t n = 0; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n > 0 && n < dim0 int32_t n = 2; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n == dim0 int32_t n = 4; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowSplits(2), row_splits2); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t n = RandInt(0, dim0); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. CheckArrayData(result.RowSplits(1), shape.RowSplits(1).Range(0, n + 1)); } } } } TEST(RaggedShapeOpsTest, GetPrefixesTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t ans_num = RandInt(0, 10); std::vector<int32_t> sizes; for (int32_t j = 0; j != ans_num; ++j) sizes.push_back(RandInt(0, dim0)); ASSERT_EQ(sizes.size(), ans_num); std::vector<RaggedShape> ans = GetPrefixes(shape, sizes); ASSERT_EQ(ans.size(), ans_num); for (int32_t j = 0; j != ans_num; ++j) { int32_t n = sizes[j]; RaggedShape ans_j = ans[j]; EXPECT_TRUE(IsCompatible(shape, ans_j)); EXPECT_EQ(ans_j.Dim0(), n); EXPECT_EQ(ans_j.NumAxes(), num_axes); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t m = 1; m != num_axes; ++m) { EXPECT_TRUE(Equal(result.RowSplits(m), ans_j.RowSplits(m))); } } } } } } TEST(RaggedShapeOpsTest, ArangeTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 3, 4, 6, 7, 10}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 2, 3, 3, 3, // 4, 5, 5, 5, 6, 7, 7, 9}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<int32_t> values(shape.NumElements()); std::iota(values.begin(), values.end(), 10); Array1<int32_t> values_array(context, values); Ragged<int32_t> ragged(shape, values_array); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 6); EXPECT_EQ(num_axes, 3); { // axis == 0, begin == end int32_t axis = 0; int32_t begin = 1, end = 1; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), 0); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {1, 1}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); EXPECT_EQ(ragged_result.values.Dim(), 0); } { // axis == 0, begin < end == Dim0() + 1 int32_t axis = 0; int32_t begin = 3, end = 6; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 3, 6}, {0, 1, 4, 5, 7, 7, 8}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {8, 16}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {18, 19, 20, 21, 22, 23, 24, 25}; CheckArrayData(ragged_result.values, expected_values); } { // axis == 1 int32_t axis = 1; int32_t begin = 6, end = 8; std::vector<int32_t> expected_row_splits = {0, 1, 3}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), 2); CheckArrayData(result.RowSplits(1), expected_row_splits); std::pair<int32_t, int32_t> expected_value_range = {12, 15}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {22, 23, 24}; CheckArrayData(ragged_result.values, expected_values); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t num_axes = shape.NumAxes(); int32_t axis = RandInt(0, num_axes - 2); int32_t tot_size = shape.TotSize(axis); int32_t begin = RandInt(0, tot_size); int32_t end = RandInt(begin, tot_size); std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), ::max(0, end - begin)); EXPECT_EQ(result.NumAxes(), num_axes - axis); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. if (begin == end) { CheckArrayData(result.RowSplits(1), std::vector<int32_t>{0}); } else { Array1<int32_t> row_splits1 = shape.RowSplits(axis + 1).Arange(begin, end + 1); row_splits1 = Minus(row_splits1, row_splits1[0]); CheckArrayData(result.RowSplits(1), row_splits1); } EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); } } } } TEST(RaggedShapeOpsTest, AppendMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape appended_axis2_ref = RaggedShape("[ [ [[ x x ][ x ][]] [[x ][x][ x ]] ] [[[x ][ x x][]]]]") .To(c); RaggedShape appended_axis3_ref = RaggedShape("[ [ [[ x x x ]] [[x x x ]] ] [[[x x x]]]]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape appended_axis2 = Append(2, 3, srcs, &merge_map2); RaggedShape appended_axis3 = Append(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "appended_axis2 = " << appended_axis2; K2_LOG(INFO) << "appended_axis3 = " << appended_axis3; K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, StackMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape stacked_ref = RaggedShape( "[ [ [[[ x x ]][[ x ]][[]]] [[[x ]][[x]][[ x ]]] ] " "[[[[x ]][[ x x]][[]]]]]") .To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape stacked_axis2 = Stack(2, 3, srcs, &merge_map2); RaggedShape stacked_axis3 = Stack(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "stacked_axis2 = " << stacked_axis2; K2_LOG(INFO) << "stacked_axis3 = " << stacked_axis3; K2_CHECK(Equal(stacked_axis2, stacked_ref)); K2_CHECK(Equal(stacked_axis2, stacked_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, Merge) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]") .To(c), // m: 0 3 6, m_out: 0 3, 6, shape2 = RaggedShape("[ [ x] [ x x x ] ]") .To(c), // m: 1 4, m_out: 1, 4 7 10 shape3 = RaggedShape("[ [ ] [ x x ] [] ]").To(c); // m: 2 5 8, m_out: ,2 5, RaggedShape ans_ref = RaggedShape("[ [] [x] [x x x] [] [] [x x] [x x] [x] ]").To(c); // This is a mixed-up kind of merge map that doesn't appear naturally (they // are always in-order from each source, right now) but it should still // work. std::vector<uint32_t> merge_map_data = {6, 1, 4, 8, 2, 5, 0, 3}; Array1<uint32_t> merge_map_in(c, merge_map_data); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map_out; RaggedShape merged = Merge(3, srcs, merge_map_in, &merge_map_out); ASSERT_EQ(true, Equal(ans_ref, merged)); std::vector<uint32_t> merge_map_out_data = {1, 4, 7, 10, 2, 5, 0, 3, 6}; CheckArrayData(merge_map_out, merge_map_out_data); } } TEST(RaggedTest, AddSuffixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> suffix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddSuffixToRagged(src, suffix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + suffix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); EXPECT_EQ(dst_cpu[src_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> suffix_cpu = suffix.To(GetCpuContext()); for (int32_t i = 0; i < suffix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i + 1] + i], suffix_cpu[i]); } } } } } TEST(RaggedTest, AddPrefixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> prefix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddPrefixToRagged(src, prefix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + prefix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); std::vector<int32_t> dst_indexes(src_indexes); dst_indexes.back() += 1; // increase the last index by 1 EXPECT_EQ(dst_cpu[dst_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> prefix_cpu = prefix.To(GetCpuContext()); for (int32_t i = 0; i < prefix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i] + i], prefix_cpu[i]); } } } } } TEST(RaggedTest, RemoveValuesLeq) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 3 4 ] [ 5 7 8 ] ]").To(c), s3 = Ragged<int32_t>(" [ [4] [5 7 8]]").To(c), s5 = Ragged<int32_t>(" [ [] [ 7 8]]").To(c); Ragged<int32_t> ans1 = RemoveValuesLeq(r, 3), ans2 = RemoveValuesLeq(r, 5); K2_LOG(INFO) << "ans2 = " << ans2; EXPECT_EQ(true, Equal(ans1, s3)); EXPECT_EQ(true, Equal(ans2, s5)); } } TEST(RaggedTest, IndexArrayRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Array1<float> f(c, std::vector<float>({0.0, 1.0, 2.0, 3.0, 4.0})); Ragged<float> fr = Ragged<float>(" [ [ 2.0 0.0 ] [ 1.0 2.0 3.0 ] ]").To(c), ans = Index(f, r); EXPECT_EQ(true, Equal(ans, fr)); } } TEST(RaggedTest, IndexRaggedRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Ragged<int32_t> s = Ragged<int32_t>(" [ [ 10 10 ] [ 11 ] [ 12 12 ] [ 13 ] [ 14 14] ]") .To(c); // NOLINT Ragged<int32_t> sr1 = Ragged<int32_t>(" [ [ [12 12] [10 10] ] [ [11] [12 12] [13] ] ]") .To(c); // NOLINT Ragged<int32_t> sr2 = Ragged<int32_t>(" [ [ 12 12 10 10 ] [ 11 12 12 13 ] ]") .To(c); // NOLINT EXPECT_EQ(true, Equal(Index(s, r, false), sr1)); EXPECT_EQ(true, Equal(Index(s, r, true), sr2)); } } TEST(RaggedShapeOpsTest, CoveringShape) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case RaggedShape shape1 = RaggedShape("[ [ x x ] [] [ x ] ]").To(c), shape2 = RaggedShape("[ [ x] [] [ x x x ] ]").To(c), shape3 = RaggedShape("[ [] [] [ x x ] ]").To(c); RaggedShape expected = RaggedShape("[ [x x] [] [x x x] ]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; RaggedShape ans = CoveringShape(3, srcs); EXPECT_TRUE(Equal(expected, ans)); // test CoveringShapeForwardMap { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2, -1, -1}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape2, ans); std::vector<int32_t> expected_map = {0, -1, 1, 2, 3}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape3, ans); std::vector<int32_t> expected_map = {-1, -1, 0, 1, -1}; CheckArrayData(elem_map, expected_map); } } { // another simple case: only one src RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]").To(c); RaggedShape *srcs[] = {&shape1}; RaggedShape ans = CoveringShape(1, srcs); EXPECT_TRUE(Equal(shape1, ans)); // test CoveringShapeForwardMap Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2}; CheckArrayData(elem_map, expected_map); } { // random case for (int32_t i = 0; i != 1; ++i) { int32_t num_shape = RandInt(1, 100); int32_t dim0 = RandInt(1, 1000); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> row_sizes = RandUniformArray1<int32_t>(c, dim0 + 1, 0, 100); ExclusiveSum(row_sizes, &row_sizes); shape_vec[j] = RaggedShape2(&row_sizes, nullptr, -1); ASSERT_TRUE(shape_vec[j].Context()->IsCompatible(*c)); ASSERT_EQ(shape_vec[j].Dim0(), dim0); shapes[j] = &shape_vec[j]; } RaggedShape ans = CoveringShape(num_shape, shapes.data()); std::vector<Array1<int32_t>> elem_map(num_shape); for (int32_t j = 0; j != num_shape; ++j) { elem_map[j] = CoveringShapeForwardMap(shape_vec[j], ans); } // check ans ASSERT_EQ(ans.NumAxes(), 2); ASSERT_EQ(ans.Dim0(), dim0); ASSERT_TRUE(ans.Context()->IsCompatible(*c)); ContextPtr cpu = GetCpuContext(); ans = ans.To(cpu); for (int32_t j = 0; j != num_shape; ++j) shape_vec[j] = shape_vec[j].To(cpu); for (int32_t d = 0; d != dim0; ++d) { int32_t max_row_size = 0; for (int32_t j = 0; j != num_shape; ++j) max_row_size = ::max( shape_vec[j].RowSplits(1)[d + 1] - shape_vec[j].RowSplits(1)[d], max_row_size); EXPECT_EQ(max_row_size, ans.RowSplits(1)[d + 1] - ans.RowSplits(1)[d]); } // test CoveringShapeForwardMap for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> cur_elem_map = elem_map[j].To(cpu); ASSERT_EQ(cur_elem_map.Dim(), ans.NumElements()); int32_t n = 0; for (RaggedShapeIndexIterator ans_iter = ans.Iterator(); !ans_iter.Done(); ans_iter.Next()) { const std::vector<int32_t> &ans_indexes = ans_iter.Value(); int32_t src_shape_linear_index = cur_elem_map[n]; if (src_shape_linear_index != -1) { EXPECT_EQ(src_shape_linear_index, shape_vec[j][ans_indexes]); } ++n; } } } } } } TEST(RaggedShapeOpsTest, RaggedShapeAxis0Splitter) { for (int32_t i = 0; i < 20; i++) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape random = RandomRaggedShape(false, 3, 6, 0, 2000); int32_t dim0 = random.Dim0(); RaggedShapeAxis0Splitter splitter(random); for (int32_t i = 0; i < dim0; i++) { int32_t offset, offset2, offset3; RaggedShape sub_shape1 = random.Index(0, i, &offset), sub_shape2 = splitter.GetElement(i, &offset2); offset3 = splitter.GetOffset(i, random.NumAxes() - 1); EXPECT_EQ(offset, offset2); EXPECT_EQ(offset, offset3); EXPECT_EQ(Equal(sub_shape1, sub_shape2), true); } } } } template <typename T> static void TestSegmentedExclusiveSum() { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case Ragged<T> src("[ [1 2 3 -1] [3 4 -1] [] [5 6 7 -1] ]"); src = src.To(c); Array1<T> dst(c, src.NumElements()); SegmentedExclusiveSum(src, &dst); std::vector<T> expected = {0, 1, 3, 6, // 0, 3, 7, // 0, 5, 11, 18}; CheckArrayData(dst, expected); // &src.values == dst SegmentedExclusiveSum(src, &src.values); CheckArrayData(src.values, expected); } { // random case, we assume the implementation for cpu is correct and only // test for Cuda version if (c->GetDeviceType() == kCuda) { for (int32_t i = 0; i != 2; ++i) { Ragged<T> cpu_ragged = RandomRagged<T>(-1000, 1000, 2, 4, 0, 5000); int32_t dim = cpu_ragged.NumElements(); Array1<T> cpu_dst(GetCpuContext(), dim); SegmentedExclusiveSum(cpu_ragged, &cpu_dst); Ragged<T> ragged = cpu_ragged.To(c); Array1<T> dst(c, dim); SegmentedExclusiveSum(ragged, &dst); CheckArrayData(dst, cpu_dst, 0.1); } } } } } TEST(RaggedOpsTest, SegmentedExclusiveSum) { TestSegmentedExclusiveSum<int32_t>(); TestSegmentedExclusiveSum<float>(); TestSegmentedExclusiveSum<double>(); } TEST(RaggedOpsTest, TestComputeHash) { for (int32_t i = 0; i < 20; i++) { Ragged<int32_t> src = RandomRagged<int32_t>( std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::max(), 2, 4, 0, 20000), src_gpu = src.To(GetCudaContext()); { Array1<int64_t> hash1 = ComputeHash<int64_t>(src), hash2 = ComputeHash<int64_t>(src_gpu).To(GetCpuContext()); EXPECT_EQ(Equal(hash1, hash2), true); } { Array1<int32_t> hash1 = ComputeHash<int32_t>(src), hash2 = ComputeHash<int32_t>(src_gpu).To(GetCpuContext()); EXPECT_EQ(Equal(hash1, hash2), true); } } } TEST(RaggedOpsTest, TestUniqueSequences) { for (int32_t i = 0; i < 20; i++) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> src = RandomRagged<int32_t>(0, 3, 2, 4, 0, 20000).To(c); Ragged<int32_t> unique = UniqueSequences(src); if (src.NumAxes() == 2) { src = Unsqueeze(src, 0); unique = Unsqueeze(unique, 0); } ContextPtr cpu = GetCpuContext(); Array1<int32_t> hash_src = ComputeHash<int32_t>(src).To(cpu), hash_unique = ComputeHash<int32_t>(unique).To(cpu); RaggedShape src_hash_shape = RemoveAxis(src.shape, src.NumAxes() - 1).To(cpu); src_hash_shape = GetLayer(src_hash_shape, src_hash_shape.NumLayers() - 1); RaggedShape unique_hash_shape = RemoveAxis(unique.shape, unique.NumAxes() - 1).To(cpu); unique_hash_shape = GetLayer(unique_hash_shape, unique_hash_shape.NumLayers() - 1); K2_CHECK_EQ(src_hash_shape.Dim0(), unique_hash_shape.Dim0()); const int32_t *src_hash_row_splits = src_hash_shape.RowSplits(1).Data(), *unique_hash_row_splits = unique_hash_shape.RowSplits(1).Data(); const int32_t *src_hash_data = hash_src.Data(), *unique_hash_data = hash_unique.Data(); for (int32_t r = 0; r < src_hash_shape.Dim0(); r++) { int32_t src_begin = src_hash_row_splits[r], src_end = src_hash_row_splits[r + 1], unique_begin = unique_hash_row_splits[r], unique_end = unique_hash_row_splits[r + 1]; std::set<int32_t> src_set(src_hash_data + src_begin, src_hash_data + src_end), unique_set(unique_hash_data + unique_begin, unique_hash_data + unique_end); EXPECT_EQ((src_set == unique_set), true); } } } } TEST(RaggedIntTest, TestCreateRagged2Int) { std::vector<std::vector<int32_t>> vecs{{7, 9}, {10, 12, 13}, {}}; std::vector<int32_t> expected_values{7, 9, 10, 12, 13}; std::vector<int32_t> expected_row_splits = {0, 2, 5, 5}; Ragged<int32_t> r = CreateRagged2(vecs); EXPECT_EQ(r.Context()->GetDeviceType(), kCpu); CheckArrayData(r.RowSplits(1), expected_row_splits); EXPECT_EQ(r.NumAxes(), 2); CheckArrayData(r.values, expected_values); Ragged<int32_t> r2("[ [7 9] [10 12 13] [] ]"); K2_CHECK(Equal(r, r2)); } TEST(RaggedFloatTest, TestCreateRagged2Float) { std::vector<std::vector<float>> vecs{{1.2, 2.3}, {}, {3.4, 5.6}}; std::vector<float> expected_values{1.2, 2.3, 3.4, 5.6}; std::vector<int32_t> expected_row_splits = {0, 2, 2, 4}; Ragged<float> r = CreateRagged2(vecs); EXPECT_EQ(r.Context()->GetDeviceType(), kCpu); CheckArrayData(r.RowSplits(1), expected_row_splits); EXPECT_EQ(r.NumAxes(), 2); CheckArrayData(r.values, expected_values); } } // namespace k2
230f68b997c8594111ead65f5f3f675d56bb5f97.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Yiming Wang * * See LICENSE for clarification regarding multiple authors */ #include <gmock/gmock.h> #include <gtest/gtest.h> #include <algorithm> #include <limits> #include <numeric> #include <set> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/array_ops.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" #include "k2/csrc/ragged_ops.h" #include "k2/csrc/tensor.h" #include "k2/csrc/test_utils.h" namespace k2 { class RaggedShapeOpsSuiteTest : public ::testing::Test { protected: RaggedShapeOpsSuiteTest() { ContextPtr context = GetCpuContext(); const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; std::vector<RaggedShapeLayer> axes; axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back(RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); simple_shape_ = RaggedShape(axes, true); // random_shape_ is on CPU random_shape_ = RandomRaggedShape(true, // set_row_ids 3, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements } RaggedShape simple_shape_; RaggedShape random_shape_; }; TEST(RaggedShapeTest, TestConstructFromString) { RaggedShape rs(" [ [ x x ] [x] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); RaggedShape rs2(" [ [ [ x x ] ] [[x]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; K2_CHECK_EQ(RaggedShape("[ ]").Dim0(), 0); ASSERT_DEATH(RaggedShape(" [ [ x x ] [x] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x x ] [[x]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ x [] x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x ] [ x ] "), ""); ASSERT_DEATH(RaggedShape(" [ x | x ] "), ""); for (int i = 0; i < 5; i++) { RaggedShape rs = RandomRaggedShape(true, 2, // min_num_axes 4, // max_num_axes 0, // min_num_elements 1000); // max_num_elements std::ostringstream os; os << rs; RaggedShape rs2; std::istringstream is(os.str()); K2_LOG(INFO) << "Shape is: " << os.str(); is >> rs2; K2_CHECK(is.good()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(rs, rs2) || rs.NumElements() == 0); } } TEST(RaggedTest, TestRaggedFromString) { Ragged<int32_t> rs(" [ [ 1 2 ] [3] ]"); Array1<int32_t> row_splits1(GetCpuContext(), std::vector<int32_t>{0, 2, 3}); K2_LOG(INFO) << rs.RowSplits(1); K2_CHECK(Equal(rs.RowSplits(1), row_splits1)); K2_CHECK_EQ(rs.values.Back(), 3); K2_CHECK_EQ(rs.values[0], 1); Ragged<int32_t> rs2(" [ [ [ 0 5 ] ] [[10]] ]"); K2_LOG(INFO) << "rs2 = " << rs2; ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [0] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 0 ] [[0]]] "), ""); ASSERT_DEATH(RaggedShape(" [ [ 0 [] 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 ] [ 0 ] "), ""); ASSERT_DEATH(RaggedShape(" [ 0 | 0 ] "), ""); for (int32_t i = 0; i < 5; i++) { Ragged<int32_t> r = RandomRagged<int32_t>(); std::ostringstream os; os << r; Ragged<int32_t> r2(os.str()); // the reason for the || below is that in "[ ]", the number of // axes is ambiguous; we assume 2. K2_CHECK(Equal(r, r2) || r.values.Dim() == 0); } } template <typename T> void TestMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits = {0}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = 0; std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> max_values(context, num_rows); // just run to check if there's any error MaxPerSublist(ragged, 1, &max_values); EXPECT_EQ(max_values.Dim(), 0); } { const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 2, 8, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> max_values(context, num_rows); T default_value = 2; MaxPerSublist(ragged, default_value, &max_values); // copy memory from GPU/CPU to CPU std::vector<T> cpu_data(max_values.Dim()); max_values.Context()->CopyDataTo( max_values.Dim() * max_values.ElementSize(), max_values.Data(), cpu, cpu_data.data()); std::vector<T> expected_data = {3, default_value, 8, default_value}; EXPECT_EQ(cpu_data, expected_data); } { // test with random large size const int32_t min_num_elements = 2000; // not random shape is on CPU RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_elements, 5000); ASSERT_EQ(shape.NumAxes(), 2); RaggedShape gpu_shape; if (context->GetDeviceType() == kCuda) { // copy shape to GPU const Array1<T> &row_splits = shape.RowSplits(1); RaggedShapeLayer shape_dim; shape_dim.row_splits = row_splits.To(GetCudaContext()); shape_dim.cached_tot_size = shape.NumElements(); std::vector<RaggedShapeLayer> axes = {shape_dim}; gpu_shape = RaggedShape(axes, true); } int32_t num_elems = shape.NumElements(); std::vector<T> data(num_elems); for (int32_t i = 0; i != 10; ++i) { std::iota(data.begin(), data.end(), 0); // randomly set data[pos] = num_elems which is // greater than any element in data int32_t pos = RandInt(0, num_elems - 1); data[pos] = num_elems; // find the corresponding row int32_t num_rows = shape.Dim0(); const int32_t *row_splits_data = shape.RowSplits(1).Data(); int32_t row = 0; for (int32_t i = 0; i < num_rows; ++i) { if (pos >= row_splits_data[i] && pos < row_splits_data[i + 1]) { row = i; break; } } Array1<T> values(context, data); Ragged<T> ragged(context->GetDeviceType() == kCuda ? gpu_shape : shape, values); Array1<T> max_values(context, num_rows); T default_value = 0; MaxPerSublist(ragged, default_value, &max_values); EXPECT_EQ(max_values[row], num_elems); } } } } TEST(RaggedShapeOpsTest, MaxPerSubListTest) { TestMaxPerSubListTest<int32_t>(); } template <typename T> void TestArgMaxPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case const std::vector<int32_t> row_splits_vec = {0}; Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<int32_t> argmax_values(context, num_rows); // just run to check if there's any error ArgMaxPerSublist(ragged, 1, &argmax_values); EXPECT_EQ(argmax_values.Dim(), 0); } { const std::vector<int32_t> row_splits_vec = {0, 3, 3, 6, 7}; Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 2, 1, 0, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> argmax_values(context, num_rows); T default_value = 2; ArgMaxPerSublist(ragged, default_value, &argmax_values); std::vector<T> expected_data = {2, -1, 3, -1}; CheckArrayData(argmax_values, expected_data); } { // test with random large size ContextPtr cpu = GetCpuContext(); for (int32_t i = 0; i != 10; ++i) { Ragged<int32_t> ragged = RandomRagged<int32_t>(0, 1000, 2, 4, 0, 5000).To(context); int32_t last_axis = ragged.NumAxes() - 1; Array1<int32_t> argmax_values(context, ragged.RowSplits(last_axis).Dim() - 1); int32_t default_value = 2; ArgMaxPerSublist(ragged, default_value, &argmax_values); ragged = ragged.To(cpu); argmax_values = argmax_values.To(cpu); Array1<int32_t> row_splits = ragged.RowSplits(last_axis); int32_t rows = row_splits.Dim() - 1; for (int32_t row = 0; row < rows; row++) { int32_t begin = row_splits[row], end = row_splits[row + 1]; int32_t max_val = 2, best_pos = -1; for (int32_t pos = begin; pos < end; pos++) { if (ragged.values[pos] >= max_val) { max_val = ragged.values[pos]; best_pos = pos; } } EXPECT_EQ(argmax_values[row], best_pos); } } } } } TEST(RaggedShapeOpsTest, ArgMaxPerSubListTest) { TestArgMaxPerSubListTest<int32_t>(); } template <typename T> void TestMinPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // empty case std::vector<int32_t> row_splits_vec = {0}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<T> values(context, 0); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); ASSERT_EQ(num_rows, 0); Array1<T> min_values(context, num_rows); // just run to check if there's any error MinPerSublist(ragged, 1, &min_values); EXPECT_EQ(min_values.Dim(), 0); } { std::vector<int32_t> row_splits_vec = {0, 2, 2, 5, 6}; Array1<T> row_splits(context, row_splits_vec); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); const std::vector<T> values_vec = {1, 3, 3, 8, 4, -1}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> min_values(context, num_rows); T default_value = 2; MinPerSublist(ragged, default_value, &min_values); // copy memory from GPU/CPU to CPU min_values = min_values.To(cpu); std::vector<T> cpu_data(min_values.Data(), min_values.Data() + min_values.Dim()); std::vector<T> expected_data = {1, default_value, default_value, -1}; EXPECT_EQ(cpu_data, expected_data); } // May add tests for random large size? (but maybe it's fine to not add as // we have tested large cases in MaxPerSubList) } } TEST(RaggedShapeOpsTest, MinPerSubListTest) { TestMinPerSubListTest<int32_t>(); } template <typename T> void TestAndOrPerSubListTest() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // And const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 6, 11, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = -1; AndPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {1, -1, 2, 0}; EXPECT_EQ(cpu_data, expected_data); } { // Or const std::vector<int32_t> row_splits = {0, 2, 2, 5, 6}; RaggedShapeLayer shape_dim; shape_dim.row_splits = Array1<int32_t>(context, row_splits); shape_dim.cached_tot_size = row_splits.back(); std::vector<RaggedShapeLayer> axes = {shape_dim}; RaggedShape shape(axes, true); const std::vector<T> values_vec = {1, 3, 3, 4, 6, 0}; Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); int32_t num_rows = ragged.shape.Dim0(); Array1<T> dst(context, num_rows); T default_value = 0; OrPerSublist(ragged, default_value, &dst); // copy memory from GPU/CPU to CPU dst = dst.To(cpu); std::vector<T> cpu_data(dst.Data(), dst.Data() + dst.Dim()); std::vector<T> expected_data = {3, 0, 7, 0}; EXPECT_EQ(cpu_data, expected_data); } } } TEST(RaggedShapeOpsTest, AndOrPerSubListTest) { TestAndOrPerSubListTest<int32_t>(); } void TestUnsqueeze(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); src_shape.Populate(); // set row_ids { // axis = 0. RaggedShape shape = Unsqueeze(src_shape, 0); int32_t dim0 = src_shape.Dim0(); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, dim0}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data(dim0, 0); CheckArrayData(row_ids0, data); } { for (size_t i = 0; i != src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = Unsqueeze(src_shape, axis); int32_t tot_size = shape.TotSize(axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); { for (int32_t i = 0; i < axis; ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i].row_ids); } } { const Array1<int32_t> &row_splits = dest_axes[axis].row_splits; std::vector<int32_t> data(tot_size + 1); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_splits, data); } { const Array1<int32_t> &row_ids = dest_axes[axis].row_ids; std::vector<int32_t> data(tot_size); std::iota(data.begin(), data.end(), 0); CheckArrayData(row_ids, data); } { for (std::size_t i = axis; i < src_axes.size(); ++i) { CheckArrayData(src_axes[i].row_splits, dest_axes[i + 1].row_splits); CheckArrayData(src_axes[i].row_ids, dest_axes[i + 1].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestUnsqueeze) { TestUnsqueeze(simple_shape_); TestUnsqueeze(random_shape_); } TEST(RaggedShapeOpsTest, TestUnsqueezeParallel) { for (int32_t i = 0; i < 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_shapes = RandInt(0, 10); std::vector<RaggedShape *> orig_shapes; for (int32_t i = 0; i < num_shapes; i++) orig_shapes.push_back( new RaggedShape(RandomRaggedShape(false, 2, 5, 0, 1000).To(c))); int32_t axis = 0; // only one supported for now. std::vector<RaggedShape> unsqueezed = UnsqueezeParallel(num_shapes, orig_shapes.data(), axis); for (int32_t i = 0; i < num_shapes; i++) { ASSERT_EQ(unsqueezed[i].Validate(), true); RaggedShape temp = RemoveAxis(unsqueezed[i], axis); ASSERT_EQ(Equal(temp, *(orig_shapes[i])), true); delete orig_shapes[i]; } } } void TestRemoveAxis(const RaggedShape &input_shape) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = input_shape.To(context); ASSERT_EQ(src_shape.NumAxes(), 4); { // axis = 0. int32_t axis = 0; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 1 int32_t axis = 1; RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { const Array1<int32_t> &row_splits0 = dest_axes[0].row_splits; std::vector<int32_t> data = {0, 3, 7, 10}; CheckArrayData(row_splits0, data); } { const Array1<int32_t> &row_ids0 = dest_axes[0].row_ids; std::vector<int32_t> data = {0, 0, 0, 1, 1, 1, 1, 2, 2, 2}; CheckArrayData(row_ids0, data); } { for (std::size_t i = 1; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i + 1].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i + 1].row_ids); } } } { // axis = 3 int32_t axis = 3; // the last axis RaggedShape shape = RemoveAxis(src_shape, axis); const std::vector<RaggedShapeLayer> &src_axes = src_shape.Layers(); const std::vector<RaggedShapeLayer> &dest_axes = shape.Layers(); ASSERT_EQ(src_axes.size(), 3); ASSERT_EQ(dest_axes.size(), 2); { for (std::size_t i = 0; i != dest_axes.size(); ++i) { CheckArrayData(dest_axes[i].row_splits, src_axes[i].row_splits); CheckArrayData(dest_axes[i].row_ids, src_axes[i].row_ids); } } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRemoveAxis) { TestRemoveAxis(simple_shape_); } TEST(RaggedShapeOpsTest, TestGetOffsets) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { for (int32_t i = 0; i != 2; ++i) { int32_t num_shape = RandInt(10, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } RaggedShape **shapes_ptr = shapes.data(); Array2<int32_t> offsets = GetOffsets(num_shape, shapes_ptr); ASSERT_EQ(offsets.Dim0(), num_axes + 1); ASSERT_EQ(offsets.Dim1(), num_shape + 1); auto acc = offsets.Accessor(); for (int32_t axis = 0; axis <= num_axes; ++axis) { int32_t sum = 0; for (int32_t j = 0; j <= num_shape; ++j) { EXPECT_EQ(acc(axis, j), sum); if (j < num_shape) { sum += (axis == 0 ? 1 : shape_vec[j].TotSize(axis - 1)); } } } } } } // returns a random ragged shape where the dims on axis 1 are all the same // (so: can be transposed). RaggedShape RandomRaggedShapeToTranspose(ContextPtr c) { ContextPtr c_cpu = GetCpuContext(); RaggedShape random = RandomRaggedShape(false, 2, 4, 0, 5000).To(c); int32_t input_dim0 = random.Dim0(), divisor = 1; for (int32_t i = 1; i * i <= input_dim0; i++) { if (input_dim0 % i == 0 && i > divisor) divisor = i; } int32_t output_dim0 = divisor, output_dim1 = input_dim0 / divisor; Array1<int32_t> row_splits = Range<int32_t>(c, output_dim0 + 1, 0, output_dim1); int32_t cached_tot_size = input_dim0; RaggedShape top_level_shape = RaggedShape2(&row_splits, nullptr, cached_tot_size); return ComposeRaggedShapes(top_level_shape, random); } TEST(RaggedShapeOpsTest, TestTranspose) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); RaggedShape shape = Transpose(src_shape); EXPECT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); RaggedShape transposed = Transpose(to_transpose); if (context->GetDeviceType() != kCpu) { to_transpose = to_transpose.To(cpu); transposed = transposed.To(cpu); } for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t i = transposed[index]; // Just make sure this doesn't crash, // don't need the value. std::swap(index[0], index[1]); i = to_transpose[index]; // don't need the value, just need to make // sure it's an allowable index. ++i; // this line just suppresses the warning `variable i set but not // used` } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); std::swap(index[0], index[1]); int32_t i = transposed[index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } template <typename T> void TestTransposeRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { const std::vector<int32_t> row_splits1_vec = {0, 2, 4, 6}; const std::vector<int32_t> row_splits2_vec = {0, 3, 4, 7, 8, 10, 12}; Array1<int32_t> row_splits1(context, row_splits1_vec); Array1<int32_t> row_splits2(context, row_splits2_vec); RaggedShape src_shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); ASSERT_EQ(src_shape.Dim0(), 3); ASSERT_EQ(src_shape.TotSize(1), 6); std::vector<T> values = {0, 1, 2, 3, 4, 5, 8, 7, 6, 9, 10, 15}; ASSERT_EQ(values.size(), src_shape.NumElements()); Array1<T> values_array(context, values); Ragged<T> ragged(src_shape, values_array); Ragged<T> ans = Transpose(ragged); RaggedShape shape = ans.shape; // Check shape ASSERT_EQ(shape.Dim0(), 2); ASSERT_EQ(shape.TotSize(1), 6); const std::vector<int32_t> expected_row_splits = {0, 3, 6}; const std::vector<int32_t> expected_row_ids = {0, 0, 0, 1, 1, 1}; CheckArrayData(shape.RowSplits(1), expected_row_splits); CheckArrayData(shape.RowIds(1), expected_row_ids); CheckArrayData(shape.RowSplits(2), {0, 3, 6, 8, 9, 10, 12}); CheckArrayData(shape.RowIds(2), {0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5}); // Check values CheckArrayData(ans.values, {0, 1, 2, 4, 5, 8, 6, 9, 3, 7, 10, 15}); } { // random case for (int32_t j = 0; j != 2; ++j) { RaggedShape to_transpose = RandomRaggedShapeToTranspose(context); int32_t num_elems = to_transpose.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); Ragged<T> src(to_transpose, src_values); Ragged<T> ans = Transpose(src); if (context->GetDeviceType() == kCuda) { src = src.To(cpu); ans = ans.To(cpu); to_transpose = to_transpose.To(cpu); } RaggedShape transposed = ans.shape; for (auto iter = transposed.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = ans[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, src[index]); } for (auto iter = to_transpose.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = src[index]; std::swap(index[0], index[1]); EXPECT_EQ(value, ans[index]); } } } } } TEST(RaggedTest, TestTransposeRagged) { TestTransposeRagged<int32_t>(); TestTransposeRagged<double>(); } void TestRaggedShape2(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 2); Array1<int32_t> row_splits = src_shape.RowSplits(1); Array1<int32_t> row_ids = src_shape.RowIds(1); int32_t cached_tot_size = src_shape.TotSize(1); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape2(&row_splits, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // both row_splits and row_ids are non-null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null RaggedShape result = RaggedShape2(&row_splits, nullptr, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_ids is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(&row_splits, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids if (row_splits.Dim() == (row_ids.Dim() == 0 ? 1 : row_ids.Back() + 2)) { { // row_splits is null RaggedShape result = RaggedShape2(nullptr, &row_ids, cached_tot_size); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } { // row_splits is null, cached_tot_size = -1 RaggedShape result = RaggedShape2(nullptr, &row_ids, -1); CheckArrayData(result.RowSplits(1), row_splits); CheckArrayData(result.RowIds(1), row_ids); EXPECT_EQ(result.TotSize(1), cached_tot_size); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape2) { TestRaggedShape2(simple_shape_); TestRaggedShape2(random_shape_); } void TestRaggedShape3(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); src_shape.Populate(); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); int32_t cached_tot_size1 = src_shape.TotSize(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); int32_t cached_tot_size2 = src_shape.TotSize(2); { // both row_splits and row_ids are non-null RaggedShape result = RaggedShape3(&row_splits1, &row_ids1, cached_tot_size1, &row_splits2, &row_ids2, cached_tot_size2); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } { // row_ids is non-null, cached_tot_size = -1 RaggedShape result = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // note if row_splits == null, then we suppose there's no empty rows after // the last row-id in row_ids bool valid1 = (row_splits1.Dim() == (row_ids1.Dim() == 0 ? 1 : row_ids1.Back() + 2)); bool valid2 = (row_splits2.Dim() == (row_ids2.Dim() == 0 ? 1 : row_ids2.Back() + 2)); if (valid1 && valid2) { RaggedShape result = RaggedShape3(nullptr, &row_ids1, -1, nullptr, &row_ids2, -1); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); EXPECT_EQ(result.TotSize(1), cached_tot_size1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); EXPECT_EQ(result.TotSize(2), cached_tot_size2); } // TODO(haowen): add more cases for other branches } } TEST_F(RaggedShapeOpsSuiteTest, TestRaggedShape3) { TestRaggedShape3(simple_shape_); TestRaggedShape3(random_shape_); } void TestComposeShape(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 3); Array1<int32_t> row_splits1 = src_shape.RowSplits(1); Array1<int32_t> row_ids1 = src_shape.RowIds(1); Array1<int32_t> row_splits2 = src_shape.RowSplits(2); Array1<int32_t> row_ids2 = src_shape.RowIds(2); RaggedShape shape1 = RaggedShape2(&row_splits1, nullptr, -1); RaggedShape shape2 = RaggedShape2(&row_splits2, nullptr, -1); RaggedShape result = ComposeRaggedShapes(shape1, shape2); ASSERT_EQ(result.NumAxes(), 3); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowIds(1), row_ids1); CheckArrayData(result.RowSplits(2), row_splits2); CheckArrayData(result.RowIds(2), row_ids2); } } TEST_F(RaggedShapeOpsSuiteTest, TestComposeShape) { TestComposeShape(simple_shape_); TestComposeShape(random_shape_); } void TestShapeFromTotSize(const RaggedShape &shape) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape src_shape = shape.To(context); ASSERT_GE(src_shape.NumAxes(), 2); int32_t num_axes = src_shape.NumAxes(); std::vector<int32_t> tot_sizes(num_axes); for (int32_t i = 0; i != num_axes; ++i) { tot_sizes[i] = src_shape.TotSize(i); } RaggedShape result = RaggedShapeFromTotSizes(context, num_axes, tot_sizes.data()); ASSERT_EQ(result.NumAxes(), num_axes); for (int32_t i = 0; i < num_axes; ++i) { EXPECT_EQ(result.TotSize(i), src_shape.TotSize(i)); if (i > 0) { EXPECT_EQ(result.RowSplits(i).Dim(), src_shape.RowSplits(i).Dim()); EXPECT_EQ(result.RowIds(i).Dim(), src_shape.RowIds(i).Dim()); } } } } TEST_F(RaggedShapeOpsSuiteTest, TestShapeFromTotSize) { TestShapeFromTotSize(simple_shape_); TestShapeFromTotSize(random_shape_); } template <typename T> void TestRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // constructed with row_splits and row_ids // RaggedTensor4 t = [ // [ [[ 1, 2], [4]], [[3, 0]] ], // [ [[7, 8, 9]], [[6], [3, 5, 7]], [[2]] ], // [ [[3, 4], [], [8]] ] // ] const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits3 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; const std::vector<int32_t> row_ids3 = {0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 9}; const std::vector<T> values_vec = {1, 2, 4, 3, 0, 7, 8, 9, 6, 3, 5, 7, 2, 3, 4, 8}; std::vector<RaggedShapeLayer> axes; axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits1), Array1<int32_t>(context, row_ids1), static_cast<int32_t>(row_ids1.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits2), Array1<int32_t>(context, row_ids2), static_cast<int32_t>(row_ids2.size())}); axes.emplace_back( RaggedShapeLayer{Array1<int32_t>(context, row_splits3), Array1<int32_t>(context, row_ids3), static_cast<int32_t>(row_ids3.size())}); RaggedShape shape(axes, true); Array1<T> values(context, values_vec); Ragged<T> ragged(shape, values); // test Index(axis, i) { // values: [[[ 1, 2], [4]], [[3, 0]]] Ragged<T> sub_raggged = ragged.Index(0, 0); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 2, 3}, {0, 2, 3, 5}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {1, 2, 4, 3, 0}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[7, 8, 9]], [[6], [3, 5, 7]], [[2]]] Ragged<T> sub_raggged = ragged.Index(0, 1); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 1, 3, 4}, {0, 3, 4, 7, 8}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {7, 8, 9, 6, 3, 5, 7, 2}; CheckArrayData<T>(sub_values, sub_values_vec); } { // values: [[[3, 4], [], [8]]] Ragged<T> sub_raggged = ragged.Index(0, 2); RaggedShape &sub_shape = sub_raggged.shape; EXPECT_EQ(sub_shape.NumAxes(), 3); const std::vector<std::vector<int32_t>> sub_row_splits_vec = { {0, 3}, {0, 2, 2, 3}}; CheckRowSplits(sub_shape, sub_row_splits_vec); const Array1<T> &sub_values = sub_raggged.values; const std::vector<T> sub_values_vec = {3, 4, 8}; CheckArrayData<T>(sub_values, sub_values_vec); } // test operator[](const std::vector<int32_t> &indexes) if (context->GetDeviceType() == kCpu) { { std::vector<int32_t> indexes = {0, 0, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 0); EXPECT_EQ(ragged[indexes], 1); } { std::vector<int32_t> indexes = {0, 1, 0, 0}; EXPECT_EQ(ragged.shape[indexes], 3); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {1, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 6); EXPECT_EQ(ragged[indexes], 8); } { std::vector<int32_t> indexes = {1, 1, 1, 0}; EXPECT_EQ(ragged.shape[indexes], 9); EXPECT_EQ(ragged[indexes], 3); } { std::vector<int32_t> indexes = {2, 0, 0, 1}; EXPECT_EQ(ragged.shape[indexes], 14); EXPECT_EQ(ragged[indexes], 4); } { std::vector<int32_t> indexes = {2, 0, 2, 0}; EXPECT_EQ(ragged.shape[indexes], 15); EXPECT_EQ(ragged[indexes], 8); } } const std::vector<std::vector<int32_t>> row_splits_vec = { row_splits1, row_splits2, row_splits3}; // test To(ctx) { // to GPU Ragged<T> other = ragged.To(GetCudaContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } { // to CPU Ragged<T> other = ragged.To(GetCpuContext()); CheckRowSplits(other.shape, row_splits_vec); CheckArrayData<T>(other.values, values_vec); } } } } template <typename T, typename OP = LessThan<T>> static void CpuSortSublists(const Array1<int32_t> &row_splits, Array1<T> *src) { K2_CHECK(src->Context()->GetDeviceType() == kCpu); T *p = src->Data(); OP comp = OP(); for (int32_t i = 0; i < row_splits.Dim() - 1; ++i) { int32_t cur = row_splits[i]; int32_t next = row_splits[i + 1]; std::sort(p + cur, p + next, comp); } } template <typename T, typename OP = LessThan<T>> static void TestSortSublists() { auto cpu_context = GetCpuContext(); auto cuda_context = GetCudaContext(); RaggedShape shape = RandomRaggedShape(false, // set_row_ids 2, // min_num_axes 4, // max_num_axes 1, // min_num_elements 2000); // max_num_elements Array1<T> values = RandUniformArray1<T>(shape.Context(), shape.NumElements(), -2000, 2000); Ragged<T> ragged(shape, values); ragged = ragged.To(cuda_context); values = values.To(cpu_context); // to be sorted by cpu Array1<T> unsorted = values.Clone(); Array1<int32_t> order(ragged.Context(), ragged.values.Dim()); SortSublists<T, OP>(&ragged, &order); Array1<int32_t> &segment = ragged.shape.RowSplits(ragged.NumAxes() - 1); CpuSortSublists<T, OP>(segment, &values); int32_t n = order.Dim(); for (int i = 0; i != n; ++i) { EXPECT_EQ(values[i], ragged.values[i]); EXPECT_EQ(ragged.values[i], unsorted[order[i]]); } } TEST(RaggedTest, Ragged) { TestRagged<int32_t>(); TestRagged<double>(); TestSortSublists<int32_t>(); TestSortSublists<double>(); } TEST(RaggedShapeOpsTest, TestAppend) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes[1] = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); shapes_ptr[1] = &shapes[1]; } { // axis == 1 RaggedShape result = Append(1, 2, shapes_ptr.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } } { // axis == 0 RaggedShape result = Append(0, 2, shapes_ptr.data()); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (auto i = 0; i < 2; ++i) { std::vector<const Array1<int32_t> *> splits_ptr = { &row_splits_vec[i][0], &row_splits_vec[i][1]}; Array1<int32_t> curr_row_splits = SpliceRowSplits(2, splits_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), result_splits[i]); CheckArrayData(result.RowIds(i + 1), result_ids[i]); } } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { shape_vec[j] = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); shapes[j] = &shape_vec[j]; } // only test case axis == 0, test axis==1 with simple case is good // enough as it just calls Stack RaggedShape result = Append(0, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes); // get result splits with `SpliceRowSplits` and get result row-ids with // `RowSplitsToRowIds`` std::vector<Array1<int32_t>> result_splits; std::vector<Array1<int32_t>> result_ids; for (int32_t axis = 1; axis < num_axes; ++axis) { std::vector<Array1<int32_t>> splits_vec(num_shape); std::vector<const Array1<int32_t> *> splits_vec_ptr(num_shape); for (int32_t n = 0; n != num_shape; ++n) { splits_vec[n] = shape_vec[n].RowSplits(axis); splits_vec_ptr[n] = &splits_vec[n]; } Array1<int32_t> curr_row_splits = SpliceRowSplits(num_shape, splits_vec_ptr.data()); result_splits.push_back(curr_row_splits); Array1<int32_t> curr_row_ids(context, curr_row_splits.Back()); RowSplitsToRowIds(curr_row_splits, &curr_row_ids); result_ids.push_back(curr_row_ids); } // check data for (int32_t axis = 1; axis < num_axes; ++axis) { CheckArrayData(result.RowSplits(axis), result_splits[axis - 1]); CheckArrayData(result.RowIds(axis), result_ids[axis - 1]); } } } } } template <typename T> void TestAppendRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // TODO(haowen): remove duplicate code in TestAppend above. // test with simple case could be good enough, as we have tested // Append(RaggedShape&) already. std::vector<Ragged<T>> ragged_vec(2); std::vector<Ragged<T> *> ragged(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<T> values_vec = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[0] = Ragged<T>(shape, values); ragged[0] = &ragged_vec[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_ids1 = {0, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; const std::vector<int32_t> row_ids2 = {0, 0, 0, 1, 2, 3, 3}; const std::vector<T> values_vec = {20, 21, 23, 28, 30, 32, 35}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); Array1<T> values(context, values_vec); ragged_vec[1] = Ragged<T>(shape, values); ragged[1] = &ragged_vec[1]; } { // axis == 0 Ragged<T> result = Append(0, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 1, 1, 1, 2, 3, 4, 4, 5}, {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 6, 7, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 7, 9, 10, 12, 14, 15, 18, 20, 21, 23, 28, 30, 32, 35}; CheckArrayData(result.values, expected_data); } { // axis == 1 Ragged<T> result = Append(1, 2, ragged.data()); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 8, 10}, {0, 2, 3, 6, 7, 9, 10, 11, 12, 15, 17}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 1, 1, 2, 2}, {0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 9, 9}}; for (int32_t i = 0; i < 2; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); CheckArrayData(result.RowIds(i + 1), expected_row_ids[i]); } std::vector<T> expected_data = {1, 2, 5, 20, 21, 23, 7, 9, 10, 12, 28, 30, 14, 15, 18, 32, 35}; CheckArrayData(result.values, expected_data); } } } TEST(RaggedTest, TestAppendRagged) { TestAppendRagged<int32_t>(); TestAppendRagged<double>(); } void CheckResultOfIndex(const ContextPtr &context, RaggedShape shape, Array1<int32_t> new2old, RaggedShape result) { K2_CHECK(context->IsCompatible(*shape.Context())); ContextPtr cpu = GetCpuContext(); // will use to copy data int32_t num_axes = shape.NumAxes(); int32_t src_dim0 = shape.Dim0(), result_dim0 = result.Dim0(); EXPECT_EQ(result_dim0, new2old.Dim()); result.Check(); for (int32_t i = 0; i < result_dim0; i++) { RaggedShape result_part = Arange(result, 0, i, i + 1); if (new2old[i] == -1) { K2_CHECK_EQ(0, result_part.TotSize(1)); } else { RaggedShape src_part = Arange(shape, 0, new2old[i], new2old[i] + 1); K2_CHECK_EQ(true, Equal(src_part, result_part)); } } } TEST(RaggedShapeOpsTest, TestIndex) { for (int i = 0; i < 5; i++) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> ids1(context, row_ids1); Array1<int32_t> splits2(context, row_splits2); Array1<int32_t> ids2(context, row_ids2); RaggedShape shape = RaggedShape3(&splits1, &ids1, ids1.Dim(), &splits2, &ids2, ids2.Dim()); std::vector<int32_t> new2old_vec = {2, 1}; Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes_out; RaggedShape result = Index(shape, 0, new2old, &value_indexes_out); // fsa 2, state_idx01 {5}, arc_idx012 {7, 8, 9} // fsa 1, state_idx01 {2, 3, 4}, arc_idx012 {{3},{4, 5}, {6}} CheckArrayData(value_indexes_out, std::vector<int32_t>{7, 8, 9, 3, 4, 5, 6}); CheckResultOfIndex(context, shape, new2old, result); } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(), result_dim0 = RandInt(0, 10); if (dim0 == 0) result_dim0 = 0; std::vector<int32_t> new2old_vec(result_dim0); for (int i = 0; i < result_dim0; i++) new2old_vec[i] = RandInt(-1, dim0 - 1); Array1<int32_t> new2old(context, new2old_vec); Array1<int32_t> value_indexes; RaggedShape result = Index(shape, 0, new2old, &value_indexes); CheckResultOfIndex(context, shape, new2old, result); K2_LOG(INFO) << "Value_indexes = " << value_indexes; } } } } } TEST(RaggedShapeOpsTest, TestIndexAxis1) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Ragged<int32_t> input = Ragged<int32_t>(" [ [ 1 2 ] [ 3 4 5 ] [ 6 7 ] [ ] ]").To(context); // NOLINT Array1<int32_t> indexes = Array1<int32_t>(" [ 1 0 4 2 6 5 ]").To(context); Ragged<int32_t> output = Ragged<int32_t>(" [ [ 2 1 ] [ 5 3 ] [ 7 6 ] [ ] ]").To(context); // NOLINT Ragged<int32_t> indexed = Index(input, 1, indexes); EXPECT_EQ(Equal(output, indexed), true); } } } TEST(GetTransposeReordering, NoDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 e4 // row2 f5 // row3 g6 h7 i8 // row4 j9 // row5 k10 l11 std::vector<int32_t> col_indexes{4, 5, 0, 1, 5, 3, 0, 2, 4, 5, 1, 4}; std::vector<int32_t> _row_splits{0, 2, 5, 6, 9, 10, 12}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 6, 3, 10, 7, 5, 0, 8, 11, 1, 4, 9}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, ThreeAxesEmptyCase) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> ragged("[ [ [ ] ] ]"); ragged = ragged.To(context); Array1<int32_t> order = GetTransposeReordering(ragged, 0); } } TEST(GetTransposeReordering, NoDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0 b1 // row1 c2 d3 // row2 e4 // row3 f5 g6 h7 // row4 i8 // row5 j9 k10 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 3, 0, 2, 1, 0, 1, 3, 5, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 2, 4, 5, 8, 9, 11}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData(order, {2, 5, 0, 4, 6, 3, 1, 7, 9, 8, 10}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicates) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 std::vector<int32_t> col_indexes{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 3, 5}; std::vector<int32_t> _row_splits{0, 5, 8, 9, 13, 16, 18}; for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits(context, _row_splits); RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1); Array1<int32_t> values(context, col_indexes); Ragged<int32_t> ragged(shape, values); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 16, 13, 14, 15, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, WithDuplicatesThreeAxes) { // col0 col1 col2 col3 col4 col5 // row0 a0,a1 b2,b3,b4 // row1 c5,c6 d7 // row2 e8 // row3 f9 g10,g11 h12 // row4 i13,i14,i15 // row5 j16 k17 for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> col_indexes( context, std::vector<int32_t>{1, 1, 3, 3, 3, 0, 0, 2, 1, 0, 1, 1, 3, 4, 4, 4, 4, 5}); Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 4, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 5, 8, 9, 13, 16, 18}); RaggedShape shape = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); Ragged<int32_t> ragged(shape, col_indexes); Array1<int32_t> order = GetTransposeReordering(ragged, 6); CheckArrayData( order, {5, 6, 9, 0, 1, 8, 10, 11, 7, 2, 3, 4, 12, 13, 14, 15, 16, 17}); EXPECT_TRUE(context->IsCompatible(*order.Context())); } } TEST(GetTransposeReordering, RandomFsaVecTest) { for (int32_t iter = 0; iter != 8; ++iter) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { int n = RandInt(100, 200); int32_t min_num_fsas = n; int32_t max_num_fsas = n * 2; bool acyclic = false; int32_t max_symbol = 100; int32_t min_num_arcs = min_num_fsas * 10; int32_t max_num_arcs = max_num_fsas * 20; FsaVec fsas = RandomFsaVec(min_num_fsas, max_num_fsas, acyclic, max_symbol, min_num_arcs, max_num_arcs); fsas = fsas.To(context); Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_states = fsas.TotSize(1); int32_t num_arcs = fsas.TotSize(2); Array1<int32_t> order = GetTransposeReordering(dest_states_tensor, num_states); Sort(&order); ASSERT_EQ(order.Dim(), num_arcs); Array1<int32_t> expected = Range<int32_t>(context, num_arcs, 0); CheckArrayData(order, expected); } } } TEST(ChangeSublistSize, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9}); size_delta = -2; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 1}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5}); } } TEST(ChangeSublistSizePinned, TwoAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5, 5}); RaggedShape src = RaggedShape2(&row_splits1, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 4, 9, 9}); size_delta = -3; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 0, 0, 0}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(1), std::vector<int32_t>{0, 2, 5, 5}); } } } TEST(ChangeSublistSize, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 5}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 21}); // it is an error to use -2 here // because the state (state_idx01 == 2) has only 1 entry size_delta = -1; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 2, 3, 3, 4, 6}); size_delta = 0; dst = ChangeSublistSize(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 11}); } } TEST(ChangeSublistSizePinned, ThreeAxes) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { /* [ [ [x, x, x], [x, x] ] [ [x], [x, x], [], [x, x, x] ] ] */ Array1<int32_t> row_splits1(context, std::vector<int32_t>{0, 2, 6}); Array1<int32_t> row_splits2(context, std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); RaggedShape src = RaggedShape3(&row_splits1, nullptr, -1, &row_splits2, nullptr, -1); int32_t size_delta = 2; RaggedShape dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 5, 9, 12, 16, 16, 21}); size_delta = -2; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 1, 1, 1, 1, 1, 2}); size_delta = 0; dst = ChangeSublistSizePinned(src, size_delta); CheckArrayData(dst.RowSplits(2), std::vector<int32_t>{0, 3, 5, 6, 8, 8, 11}); } } TEST(RaggedShapeOpsTest, TestGetCountsPartitioned) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // Testing with simple case is good enough as we have tested GetCounts() // with random large size and GetCountsPartitioned just calls GetCounts. std::vector<int32_t> src_row_splits_vec = {0, 3, 4, 6, 10}; Array1<int32_t> src_row_splits(context, src_row_splits_vec); RaggedShape src_shape = RaggedShape2(&src_row_splits, nullptr, -1); std::vector<int32_t> src_values_vec = {0, 1, 0, 2, 5, 5, 7, 7, 9, 7}; Array1<int32_t> src_values(context, src_values_vec); Ragged<int32_t> src(src_shape, src_values); std::vector<int32_t> ans_row_splits_vec = {0, 2, 4, 7, 10}; Array1<int32_t> ans_row_splits(context, ans_row_splits_vec); RaggedShape ans_shape = RaggedShape2(&ans_row_splits, nullptr, -1); Ragged<int32_t> result = GetCountsPartitioned(src, ans_shape); ASSERT_EQ(result.NumAxes(), 2); // Check row_splits Array1<int32_t> row_splits = result.shape.RowSplits(1).To(cpu); std::vector<int32_t> result_row_splits( row_splits.Data(), row_splits.Data() + row_splits.Dim()); EXPECT_EQ(result_row_splits, ans_row_splits_vec); // check values std::vector<int32_t> expected_data = {2, 1, 1, 0, 0, 2, 0, 3, 0, 1}; Array1<int32_t> values = result.values.To(cpu); std::vector<int32_t> data(values.Data(), values.Data() + values.Dim()); EXPECT_EQ(data, expected_data); } } TEST(RaggedShapeOpsTest, TestStack) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case std::vector<RaggedShape> shapes(2); std::vector<RaggedShape *> shapes_ptr(2); std::vector<std::vector<Array1<int32_t>>> row_splits_vec(2); { const std::vector<int32_t> row_splits1 = {0, 2, 5, 6}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[0] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[0] = &shapes[0]; } { const std::vector<int32_t> row_splits1 = {0, 1, 3, 4}; const std::vector<int32_t> row_splits2 = {0, 3, 4, 5, 7}; Array1<int32_t> splits1(context, row_splits1); Array1<int32_t> splits2(context, row_splits2); row_splits_vec[0].push_back(splits1); row_splits_vec[1].push_back(splits2); shapes[1] = RaggedShape3(&splits1, nullptr, -1, &splits2, nullptr, -1); shapes_ptr[1] = &shapes[1]; } std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6}, {0, 2, 5, 6, 7, 9, 10}, {0, 2, 3, 4, 6, 7, 10, 13, 14, 15, 17}}; { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(result.RowSplits(i + 1), expected_row_splits[i]); } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, 2, shapes_ptr.data()); RaggedShape transpose = Transpose(result); for (int32_t i = 0; i != 3; ++i) { CheckArrayData(transpose.RowSplits(i + 1), expected_row_splits[i]); } } } { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); shape_vec[j] = new_shape; shapes[j] = &shape_vec[j]; } std::vector<RaggedShape> cpu_shapes(num_shape); for (auto i = 0; i != num_shape; ++i) { cpu_shapes[i] = shape_vec[i].To(cpu); } { // axis == 0 int32_t axis = 0; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } { // axis == 1 int32_t axis = 1; RaggedShape result = Stack(axis, num_shape, shapes.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); for (auto iter = result.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); int32_t t = result[index]; // don't need the value, just make sure // it's a valid index. int32_t i = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] i = cpu_shapes[i][index]; // don't need the value, just need to // make sure it's an allowable index. } } } } } } template <typename T> void TestStackRagged() { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { // test with random large size for (int32_t m = 0; m < 2; ++m) { int32_t num_shape = RandInt(2, 100); int32_t num_axes = RandInt(2, 4); int32_t dim0 = RandInt(1, 100); std::vector<Ragged<T>> ragged_vec(num_shape); std::vector<Ragged<T> *> ragged(num_shape); for (int32_t j = 0; j != num_shape; ++j) { RaggedShape shape = RandomRaggedShape(false, num_axes, num_axes, 0, 1000).To(context); int32_t src_dim0 = shape.Dim0(); std::vector<int32_t> row_splits_vec(dim0 + 1); row_splits_vec[0] = 0; for (int32_t n = 1; n < dim0; ++n) { row_splits_vec[n] = RandInt(0, src_dim0); } row_splits_vec[dim0] = src_dim0; std::sort(row_splits_vec.begin(), row_splits_vec.end()); Array1<int32_t> row_splits(context, row_splits_vec); RaggedShape first = RaggedShape2(&row_splits, nullptr, -1); RaggedShape new_shape = ComposeRaggedShapes(first, shape); int32_t num_elems = new_shape.NumElements(); Array1<T> src_values = RandUniformArray1<T>(context, num_elems, 0, 10000); ragged_vec[j] = Ragged<T>(new_shape, src_values); ragged[j] = &ragged_vec[j]; } std::vector<Ragged<T>> cpu_ragged_vec(num_shape); for (auto j = 0; j != num_shape; ++j) { cpu_ragged_vec[j] = ragged_vec[j].To(cpu); } { // axis == 0 int32_t axis = 0; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), num_shape); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t i = index[0]; index.erase(index.begin()); // result[i,j,k,l] = (shape[i])[j,k,l] EXPECT_EQ(value, cpu_ragged_vec[i][index]); } } { // axis == 1 int32_t axis = 1; Ragged<T> result = Stack(axis, num_shape, ragged.data()); ASSERT_EQ(result.NumAxes(), num_axes + 2); // note we append one axis in each shape in // `shapes` before `Stack` ASSERT_EQ(result.Dim0(), dim0); result = result.To(cpu); RaggedShape &shape = result.shape; for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { std::vector<int32_t> index = iter.Value(); T value = result[index]; int32_t j = index[1]; index.erase(index.begin() + 1); // result[i,j,k,l] = (shape[j])[i,k,l] EXPECT_EQ(value, cpu_ragged_vec[j][index]); } } } } } TEST(RaggedTest, TestStackRagged) { TestStackRagged<int32_t>(); TestStackRagged<double>(); } TEST(RaggedTest, TestMaxSize) { for (int32_t i = 0; i <= 10; i++) { ContextPtr c = (i % 2 == 0 ? GetCpuContext() : GetCudaContext()); int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(c); int32_t axis = RandInt(1, num_axes - 1); int32_t max_size = shape.MaxSize(axis); if (axis == 0) { K2_CHECK(max_size == shape.Dim0()); } else { Array1<int32_t> row_splits = shape.RowSplits(axis).To(GetCpuContext()); int32_t *row_splits_data = row_splits.Data(); int32_t m = 0; for (int32_t i = 0; i + 1 < row_splits.Dim(); i++) { int32_t size = row_splits_data[i + 1] - row_splits_data[i]; if (size > m) m = size; } ASSERT_EQ(m, max_size); } } } TEST(RaggedShapeOpsTest, TestMakeTransposable) { ContextPtr cpu = GetCpuContext(); // will be used to copy data for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 1, 1, 2, 3, 3}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, // 6, 7}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<std::vector<int32_t>> expected_row_splits = { {0, 3, 6, 9, 12}, {0, 2, 3, 3, 4, 6, 7, 10, 10, 10, 12, 13, 13}}; std::vector<std::vector<int32_t>> expected_row_ids = { {0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3}, {0, 0, 1, 3, 4, 4, 5, 6, 6, 6, 9, 9, 10}}; RaggedShape result = MakeTransposable(shape); for (int32_t i = 1; i != 3; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); CheckArrayData(result.RowIds(i), expected_row_ids[i - 1]); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { int32_t num_axes = RandInt(2, 4); RaggedShape shape = RandomRaggedShape(true, num_axes, num_axes, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t max_size = shape.MaxSize(1); RaggedShape result = MakeTransposable(shape); shape = shape.To(cpu); result = result.To(cpu); EXPECT_EQ(result.Dim0(), dim0); EXPECT_EQ(result.TotSize(1), dim0 * max_size); // check if every sub list in axis 1 has the same size int32_t *row_splits1 = result.RowSplits(1).Data(); for (int32_t j = 0; j != dim0 + 1; ++j) { EXPECT_EQ(row_splits1[j], j * max_size); } if (num_axes > 2) { for (auto iter = shape.Iterator(); !iter.Done(); iter.Next()) { const std::vector<int32_t> &index = iter.Value(); EXPECT_EQ(shape[index], result[index]); } } } } } } TEST(RaggedShapeOpsTest, PrefixTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 5, 6, 8}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 4, 6, 7, 10, 12, 13}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 4); EXPECT_EQ(num_axes, 3); { // n == 0 int32_t n = 0; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n > 0 && n < dim0 int32_t n = 2; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } } { // n == dim0 int32_t n = 4; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 5}, {0, 2, 3, 4, 6, 7}}; RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); CheckArrayData(result.RowSplits(1), row_splits1); CheckArrayData(result.RowSplits(2), row_splits2); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t n = RandInt(0, dim0); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. CheckArrayData(result.RowSplits(1), shape.RowSplits(1).Range(0, n + 1)); } } } } TEST(RaggedShapeOpsTest, GetPrefixesTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); int32_t ans_num = RandInt(0, 10); std::vector<int32_t> sizes; for (int32_t j = 0; j != ans_num; ++j) sizes.push_back(RandInt(0, dim0)); ASSERT_EQ(sizes.size(), ans_num); std::vector<RaggedShape> ans = GetPrefixes(shape, sizes); ASSERT_EQ(ans.size(), ans_num); for (int32_t j = 0; j != ans_num; ++j) { int32_t n = sizes[j]; RaggedShape ans_j = ans[j]; EXPECT_TRUE(IsCompatible(shape, ans_j)); EXPECT_EQ(ans_j.Dim0(), n); EXPECT_EQ(ans_j.NumAxes(), num_axes); RaggedShape result = Prefix(shape, n); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), n); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t m = 1; m != num_axes; ++m) { EXPECT_TRUE(Equal(result.RowSplits(m), ans_j.RowSplits(m))); } } } } } } TEST(RaggedShapeOpsTest, ArangeTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // simple case const std::vector<int32_t> row_splits1 = {0, 2, 3, 4, 6, 7, 10}; // const std::vector<int32_t> row_ids1 = {0, 0, 1, 2, 3, 3, 4, 5, 5, 5}; const std::vector<int32_t> row_splits2 = {0, 2, 3, 5, 8, 9, 12, 13, 15, 15, 16}; // const std::vector<int32_t> row_ids2 = {0, 0, 1, 2, 2, 3, 3, 3, // 4, 5, 5, 5, 6, 7, 7, 9}; Array1<int32_t> row_splits1_array(context, row_splits1); Array1<int32_t> row_splits2_array(context, row_splits2); RaggedShape shape = RaggedShape3(&row_splits1_array, nullptr, -1, &row_splits2_array, nullptr, -1); std::vector<int32_t> values(shape.NumElements()); std::iota(values.begin(), values.end(), 10); Array1<int32_t> values_array(context, values); Ragged<int32_t> ragged(shape, values_array); int32_t dim0 = shape.Dim0(); int32_t num_axes = shape.NumAxes(); EXPECT_EQ(dim0, 6); EXPECT_EQ(num_axes, 3); { // axis == 0, begin == end int32_t axis = 0; int32_t begin = 1, end = 1; std::vector<std::vector<int32_t>> expected_row_splits = {{0}, {0}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), 0); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {1, 1}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); EXPECT_EQ(ragged_result.values.Dim(), 0); } { // axis == 0, begin < end == Dim0() + 1 int32_t axis = 0; int32_t begin = 3, end = 6; std::vector<std::vector<int32_t>> expected_row_splits = { {0, 2, 3, 6}, {0, 1, 4, 5, 7, 7, 8}}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), num_axes); for (int32_t i = 1; i != num_axes; ++i) { CheckArrayData(result.RowSplits(i), expected_row_splits[i - 1]); } std::pair<int32_t, int32_t> expected_value_range = {8, 16}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {18, 19, 20, 21, 22, 23, 24, 25}; CheckArrayData(ragged_result.values, expected_values); } { // axis == 1 int32_t axis = 1; int32_t begin = 6, end = 8; std::vector<int32_t> expected_row_splits = {0, 1, 3}; std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.NumAxes(), 2); CheckArrayData(result.RowSplits(1), expected_row_splits); std::pair<int32_t, int32_t> expected_value_range = {12, 15}; EXPECT_EQ(value_range, expected_value_range); EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); // test `Arange` for ragged array Ragged<int32_t> ragged_result = Arange(ragged, axis, begin, end); std::vector<int32_t> expected_values = {22, 23, 24}; CheckArrayData(ragged_result.values, expected_values); } } { // test with random large size for (int32_t i = 0; i < 2; ++i) { RaggedShape shape = RandomRaggedShape(false, 2, 4, 0, 1000).To(context); int32_t num_axes = shape.NumAxes(); int32_t axis = RandInt(0, num_axes - 2); int32_t tot_size = shape.TotSize(axis); int32_t begin = RandInt(0, tot_size); int32_t end = RandInt(begin, tot_size); std::pair<int32_t, int32_t> value_range; RaggedShape result = Arange(shape, axis, begin, end, &value_range); EXPECT_TRUE(IsCompatible(shape, result)); EXPECT_EQ(result.Dim0(), std::max(0, end - begin)); EXPECT_EQ(result.NumAxes(), num_axes - axis); // just check row_splits1 here would be fine, as we have tested it with // simple case. We just confirm it can run successfully with kinds of // different random shapes. if (begin == end) { CheckArrayData(result.RowSplits(1), std::vector<int32_t>{0}); } else { Array1<int32_t> row_splits1 = shape.RowSplits(axis + 1).Arange(begin, end + 1); row_splits1 = Minus(row_splits1, row_splits1[0]); CheckArrayData(result.RowSplits(1), row_splits1); } EXPECT_EQ(result.NumElements(), value_range.second - value_range.first); } } } } TEST(RaggedShapeOpsTest, AppendMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape appended_axis2_ref = RaggedShape("[ [ [[ x x ][ x ][]] [[x ][x][ x ]] ] [[[x ][ x x][]]]]") .To(c); RaggedShape appended_axis3_ref = RaggedShape("[ [ [[ x x x ]] [[x x x ]] ] [[[x x x]]]]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape appended_axis2 = Append(2, 3, srcs, &merge_map2); RaggedShape appended_axis3 = Append(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "appended_axis2 = " << appended_axis2; K2_LOG(INFO) << "appended_axis3 = " << appended_axis3; K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); K2_CHECK(Equal(appended_axis2, appended_axis2_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, StackMoreAxes) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ [ [ x x ] ] [ [x ] ] ] [[[x]]]]").To(c), shape2 = RaggedShape("[ [ [ [x ] ] [ [x ] ] ] [[[x x]]]]").To(c), shape3 = RaggedShape("[ [ [ [ ] ] [ [ x ] ] ] [[[]]]]").To(c); RaggedShape stacked_ref = RaggedShape( "[ [ [[[ x x ]][[ x ]][[]]] [[[x ]][[x]][[ x ]]] ] " "[[[[x ]][[ x x]][[]]]]]") .To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map2; Array1<uint32_t> merge_map3; RaggedShape stacked_axis2 = Stack(2, 3, srcs, &merge_map2); RaggedShape stacked_axis3 = Stack(3, 3, srcs, &merge_map3); K2_LOG(INFO) << "stacked_axis2 = " << stacked_axis2; K2_LOG(INFO) << "stacked_axis3 = " << stacked_axis3; K2_CHECK(Equal(stacked_axis2, stacked_ref)); K2_CHECK(Equal(stacked_axis2, stacked_ref)); std::vector<uint32_t> merge_values = {0, 3, 1, 6, 4, 2, 9, 7, 10}; CheckArrayData(merge_map2, merge_values); CheckArrayData(merge_map3, merge_values); } } TEST(RaggedShapeOpsTest, Merge) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]") .To(c), // m: 0 3 6, m_out: 0 3, 6, shape2 = RaggedShape("[ [ x] [ x x x ] ]") .To(c), // m: 1 4, m_out: 1, 4 7 10 shape3 = RaggedShape("[ [ ] [ x x ] [] ]").To(c); // m: 2 5 8, m_out: ,2 5, RaggedShape ans_ref = RaggedShape("[ [] [x] [x x x] [] [] [x x] [x x] [x] ]").To(c); // This is a mixed-up kind of merge map that doesn't appear naturally (they // are always in-order from each source, right now) but it should still // work. std::vector<uint32_t> merge_map_data = {6, 1, 4, 8, 2, 5, 0, 3}; Array1<uint32_t> merge_map_in(c, merge_map_data); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; Array1<uint32_t> merge_map_out; RaggedShape merged = Merge(3, srcs, merge_map_in, &merge_map_out); ASSERT_EQ(true, Equal(ans_ref, merged)); std::vector<uint32_t> merge_map_out_data = {1, 4, 7, 10, 2, 5, 0, 3, 6}; CheckArrayData(merge_map_out, merge_map_out_data); } } TEST(RaggedTest, AddSuffixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> suffix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddSuffixToRagged(src, suffix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + suffix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); EXPECT_EQ(dst_cpu[src_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> suffix_cpu = suffix.To(GetCpuContext()); for (int32_t i = 0; i < suffix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i + 1] + i], suffix_cpu[i]); } } } } } TEST(RaggedTest, AddPrefixToRaggedTest) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { { // test with random large size for (int32_t i = 0; i < 10; ++i) { Ragged<int32_t> src = RandomRagged<int32_t>().To(context); int32_t num_axes = src.NumAxes(); Array1<int32_t> prefix = RandUniformArray1<int32_t>( context, src.TotSize(num_axes - 2), 0, 100); Ragged<int32_t> dst = AddPrefixToRagged(src, prefix); EXPECT_EQ(dst.NumAxes(), num_axes); EXPECT_EQ(dst.NumElements(), src.NumElements() + prefix.Dim()); Ragged<int32_t> src_cpu = src.To(GetCpuContext()); Ragged<int32_t> dst_cpu = dst.To(GetCpuContext()); for (RaggedShapeIndexIterator src_iter = src_cpu.shape.Iterator(); !src_iter.Done(); src_iter.Next()) { const std::vector<int32_t> &src_indexes = src_iter.Value(); std::vector<int32_t> dst_indexes(src_indexes); dst_indexes.back() += 1; // increase the last index by 1 EXPECT_EQ(dst_cpu[dst_indexes], src_cpu[src_indexes]); } Array1<int32_t> src_row_splits = src_cpu.RowSplits(num_axes - 1); Array1<int32_t> prefix_cpu = prefix.To(GetCpuContext()); for (int32_t i = 0; i < prefix.Dim(); ++i) { EXPECT_EQ(dst_cpu.values[src_row_splits[i] + i], prefix_cpu[i]); } } } } } TEST(RaggedTest, RemoveValuesLeq) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 3 4 ] [ 5 7 8 ] ]").To(c), s3 = Ragged<int32_t>(" [ [4] [5 7 8]]").To(c), s5 = Ragged<int32_t>(" [ [] [ 7 8]]").To(c); Ragged<int32_t> ans1 = RemoveValuesLeq(r, 3), ans2 = RemoveValuesLeq(r, 5); K2_LOG(INFO) << "ans2 = " << ans2; EXPECT_EQ(true, Equal(ans1, s3)); EXPECT_EQ(true, Equal(ans2, s5)); } } TEST(RaggedTest, IndexArrayRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Array1<float> f(c, std::vector<float>({0.0, 1.0, 2.0, 3.0, 4.0})); Ragged<float> fr = Ragged<float>(" [ [ 2.0 0.0 ] [ 1.0 2.0 3.0 ] ]").To(c), ans = Index(f, r); EXPECT_EQ(true, Equal(ans, fr)); } } TEST(RaggedTest, IndexRaggedRagged) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> r = Ragged<int32_t>(" [ [ 2 0 ] [ 1 2 3 ] ]").To(c); Ragged<int32_t> s = Ragged<int32_t>(" [ [ 10 10 ] [ 11 ] [ 12 12 ] [ 13 ] [ 14 14] ]") .To(c); // NOLINT Ragged<int32_t> sr1 = Ragged<int32_t>(" [ [ [12 12] [10 10] ] [ [11] [12 12] [13] ] ]") .To(c); // NOLINT Ragged<int32_t> sr2 = Ragged<int32_t>(" [ [ 12 12 10 10 ] [ 11 12 12 13 ] ]") .To(c); // NOLINT EXPECT_EQ(true, Equal(Index(s, r, false), sr1)); EXPECT_EQ(true, Equal(Index(s, r, true), sr2)); } } TEST(RaggedShapeOpsTest, CoveringShape) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case RaggedShape shape1 = RaggedShape("[ [ x x ] [] [ x ] ]").To(c), shape2 = RaggedShape("[ [ x] [] [ x x x ] ]").To(c), shape3 = RaggedShape("[ [] [] [ x x ] ]").To(c); RaggedShape expected = RaggedShape("[ [x x] [] [x x x] ]").To(c); RaggedShape *srcs[] = {&shape1, &shape2, &shape3}; RaggedShape ans = CoveringShape(3, srcs); EXPECT_TRUE(Equal(expected, ans)); // test CoveringShapeForwardMap { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2, -1, -1}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape2, ans); std::vector<int32_t> expected_map = {0, -1, 1, 2, 3}; CheckArrayData(elem_map, expected_map); } { Array1<int32_t> elem_map = CoveringShapeForwardMap(shape3, ans); std::vector<int32_t> expected_map = {-1, -1, 0, 1, -1}; CheckArrayData(elem_map, expected_map); } } { // another simple case: only one src RaggedShape shape1 = RaggedShape("[ [ x x ] [ x ] [] ]").To(c); RaggedShape *srcs[] = {&shape1}; RaggedShape ans = CoveringShape(1, srcs); EXPECT_TRUE(Equal(shape1, ans)); // test CoveringShapeForwardMap Array1<int32_t> elem_map = CoveringShapeForwardMap(shape1, ans); std::vector<int32_t> expected_map = {0, 1, 2}; CheckArrayData(elem_map, expected_map); } { // random case for (int32_t i = 0; i != 1; ++i) { int32_t num_shape = RandInt(1, 100); int32_t dim0 = RandInt(1, 1000); std::vector<RaggedShape> shape_vec(num_shape); std::vector<RaggedShape *> shapes(num_shape); for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> row_sizes = RandUniformArray1<int32_t>(c, dim0 + 1, 0, 100); ExclusiveSum(row_sizes, &row_sizes); shape_vec[j] = RaggedShape2(&row_sizes, nullptr, -1); ASSERT_TRUE(shape_vec[j].Context()->IsCompatible(*c)); ASSERT_EQ(shape_vec[j].Dim0(), dim0); shapes[j] = &shape_vec[j]; } RaggedShape ans = CoveringShape(num_shape, shapes.data()); std::vector<Array1<int32_t>> elem_map(num_shape); for (int32_t j = 0; j != num_shape; ++j) { elem_map[j] = CoveringShapeForwardMap(shape_vec[j], ans); } // check ans ASSERT_EQ(ans.NumAxes(), 2); ASSERT_EQ(ans.Dim0(), dim0); ASSERT_TRUE(ans.Context()->IsCompatible(*c)); ContextPtr cpu = GetCpuContext(); ans = ans.To(cpu); for (int32_t j = 0; j != num_shape; ++j) shape_vec[j] = shape_vec[j].To(cpu); for (int32_t d = 0; d != dim0; ++d) { int32_t max_row_size = 0; for (int32_t j = 0; j != num_shape; ++j) max_row_size = std::max( shape_vec[j].RowSplits(1)[d + 1] - shape_vec[j].RowSplits(1)[d], max_row_size); EXPECT_EQ(max_row_size, ans.RowSplits(1)[d + 1] - ans.RowSplits(1)[d]); } // test CoveringShapeForwardMap for (int32_t j = 0; j != num_shape; ++j) { Array1<int32_t> cur_elem_map = elem_map[j].To(cpu); ASSERT_EQ(cur_elem_map.Dim(), ans.NumElements()); int32_t n = 0; for (RaggedShapeIndexIterator ans_iter = ans.Iterator(); !ans_iter.Done(); ans_iter.Next()) { const std::vector<int32_t> &ans_indexes = ans_iter.Value(); int32_t src_shape_linear_index = cur_elem_map[n]; if (src_shape_linear_index != -1) { EXPECT_EQ(src_shape_linear_index, shape_vec[j][ans_indexes]); } ++n; } } } } } } TEST(RaggedShapeOpsTest, RaggedShapeAxis0Splitter) { for (int32_t i = 0; i < 20; i++) { for (auto &context : {GetCpuContext(), GetCudaContext()}) { RaggedShape random = RandomRaggedShape(false, 3, 6, 0, 2000); int32_t dim0 = random.Dim0(); RaggedShapeAxis0Splitter splitter(random); for (int32_t i = 0; i < dim0; i++) { int32_t offset, offset2, offset3; RaggedShape sub_shape1 = random.Index(0, i, &offset), sub_shape2 = splitter.GetElement(i, &offset2); offset3 = splitter.GetOffset(i, random.NumAxes() - 1); EXPECT_EQ(offset, offset2); EXPECT_EQ(offset, offset3); EXPECT_EQ(Equal(sub_shape1, sub_shape2), true); } } } } template <typename T> static void TestSegmentedExclusiveSum() { for (auto &c : {GetCpuContext(), GetCudaContext()}) { { // simple case Ragged<T> src("[ [1 2 3 -1] [3 4 -1] [] [5 6 7 -1] ]"); src = src.To(c); Array1<T> dst(c, src.NumElements()); SegmentedExclusiveSum(src, &dst); std::vector<T> expected = {0, 1, 3, 6, // 0, 3, 7, // 0, 5, 11, 18}; CheckArrayData(dst, expected); // &src.values == dst SegmentedExclusiveSum(src, &src.values); CheckArrayData(src.values, expected); } { // random case, we assume the implementation for cpu is correct and only // test for Cuda version if (c->GetDeviceType() == kCuda) { for (int32_t i = 0; i != 2; ++i) { Ragged<T> cpu_ragged = RandomRagged<T>(-1000, 1000, 2, 4, 0, 5000); int32_t dim = cpu_ragged.NumElements(); Array1<T> cpu_dst(GetCpuContext(), dim); SegmentedExclusiveSum(cpu_ragged, &cpu_dst); Ragged<T> ragged = cpu_ragged.To(c); Array1<T> dst(c, dim); SegmentedExclusiveSum(ragged, &dst); CheckArrayData(dst, cpu_dst, 0.1); } } } } } TEST(RaggedOpsTest, SegmentedExclusiveSum) { TestSegmentedExclusiveSum<int32_t>(); TestSegmentedExclusiveSum<float>(); TestSegmentedExclusiveSum<double>(); } TEST(RaggedOpsTest, TestComputeHash) { for (int32_t i = 0; i < 20; i++) { Ragged<int32_t> src = RandomRagged<int32_t>( std::numeric_limits<int32_t>::min(), std::numeric_limits<int32_t>::max(), 2, 4, 0, 20000), src_gpu = src.To(GetCudaContext()); { Array1<int64_t> hash1 = ComputeHash<int64_t>(src), hash2 = ComputeHash<int64_t>(src_gpu).To(GetCpuContext()); EXPECT_EQ(Equal(hash1, hash2), true); } { Array1<int32_t> hash1 = ComputeHash<int32_t>(src), hash2 = ComputeHash<int32_t>(src_gpu).To(GetCpuContext()); EXPECT_EQ(Equal(hash1, hash2), true); } } } TEST(RaggedOpsTest, TestUniqueSequences) { for (int32_t i = 0; i < 20; i++) { for (auto &c : {GetCpuContext(), GetCudaContext()}) { Ragged<int32_t> src = RandomRagged<int32_t>(0, 3, 2, 4, 0, 20000).To(c); Ragged<int32_t> unique = UniqueSequences(src); if (src.NumAxes() == 2) { src = Unsqueeze(src, 0); unique = Unsqueeze(unique, 0); } ContextPtr cpu = GetCpuContext(); Array1<int32_t> hash_src = ComputeHash<int32_t>(src).To(cpu), hash_unique = ComputeHash<int32_t>(unique).To(cpu); RaggedShape src_hash_shape = RemoveAxis(src.shape, src.NumAxes() - 1).To(cpu); src_hash_shape = GetLayer(src_hash_shape, src_hash_shape.NumLayers() - 1); RaggedShape unique_hash_shape = RemoveAxis(unique.shape, unique.NumAxes() - 1).To(cpu); unique_hash_shape = GetLayer(unique_hash_shape, unique_hash_shape.NumLayers() - 1); K2_CHECK_EQ(src_hash_shape.Dim0(), unique_hash_shape.Dim0()); const int32_t *src_hash_row_splits = src_hash_shape.RowSplits(1).Data(), *unique_hash_row_splits = unique_hash_shape.RowSplits(1).Data(); const int32_t *src_hash_data = hash_src.Data(), *unique_hash_data = hash_unique.Data(); for (int32_t r = 0; r < src_hash_shape.Dim0(); r++) { int32_t src_begin = src_hash_row_splits[r], src_end = src_hash_row_splits[r + 1], unique_begin = unique_hash_row_splits[r], unique_end = unique_hash_row_splits[r + 1]; std::set<int32_t> src_set(src_hash_data + src_begin, src_hash_data + src_end), unique_set(unique_hash_data + unique_begin, unique_hash_data + unique_end); EXPECT_EQ((src_set == unique_set), true); } } } } TEST(RaggedIntTest, TestCreateRagged2Int) { std::vector<std::vector<int32_t>> vecs{{7, 9}, {10, 12, 13}, {}}; std::vector<int32_t> expected_values{7, 9, 10, 12, 13}; std::vector<int32_t> expected_row_splits = {0, 2, 5, 5}; Ragged<int32_t> r = CreateRagged2(vecs); EXPECT_EQ(r.Context()->GetDeviceType(), kCpu); CheckArrayData(r.RowSplits(1), expected_row_splits); EXPECT_EQ(r.NumAxes(), 2); CheckArrayData(r.values, expected_values); Ragged<int32_t> r2("[ [7 9] [10 12 13] [] ]"); K2_CHECK(Equal(r, r2)); } TEST(RaggedFloatTest, TestCreateRagged2Float) { std::vector<std::vector<float>> vecs{{1.2, 2.3}, {}, {3.4, 5.6}}; std::vector<float> expected_values{1.2, 2.3, 3.4, 5.6}; std::vector<int32_t> expected_row_splits = {0, 2, 2, 4}; Ragged<float> r = CreateRagged2(vecs); EXPECT_EQ(r.Context()->GetDeviceType(), kCpu); CheckArrayData(r.RowSplits(1), expected_row_splits); EXPECT_EQ(r.NumAxes(), 2); CheckArrayData(r.values, expected_values); } } // namespace k2
c73a919f032abef623589d2d991a4ef80cdc1372.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "graph_ray_projection.hpp" // This flag activates timing of the code #define DEBUG_TIME 0 #define MAXTHREADS 1024 #define EPSILON 0.000001 // Cuda error checking fucntion. #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { mexPrintf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort){ hipDeviceReset(); mexErrMsgIdAndTxt("MEX:graph_ray_projections", "."); } } } __device__ __inline__ float maxf_cuda(float a,float b){ return (a>b)?a:b; } __device__ __inline__ float minf_cuda(float a,float b){ return (a<b)?a:b; } __device__ void warpMaxReduce(volatile float *sdata,unsigned int tid) { sdata[tid] = maxf_cuda(sdata[tid + 32],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 16],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 8],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 4],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 2],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 1],sdata[tid]); } __device__ void warpMinReduce(volatile float *sdata,unsigned int tid) { sdata[tid] = minf_cuda(sdata[tid + 32],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 16],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 8],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 4],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 2],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 1],sdata[tid]); } __global__ void maxReduceOffset(float *g_idata, float *g_odata, unsigned long n,unsigned int offset){ extern __shared__ volatile float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + tid; unsigned int gridSize = blockDim.x*gridDim.x; float myMax = 0; while (i < n) { myMax = maxf_cuda(myMax,g_idata[i*3+offset]); i += gridSize; } sdata[tid] = myMax; __syncthreads(); if (tid < 512) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 512]); __syncthreads(); if (tid < 256) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 256]); __syncthreads(); if (tid < 128) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 128]); __syncthreads(); if (tid < 64) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 64]); __syncthreads(); if (tid < 32){ warpMaxReduce(sdata,tid); myMax = sdata[0]; } if (tid == 0) g_odata[blockIdx.x] = myMax; } __global__ void minReduceOffset(float *g_idata, float *g_odata, unsigned long n,unsigned int offset){ extern __shared__ volatile float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + tid; unsigned int gridSize = blockDim.x*gridDim.x; float myMin = 0; while (i < n) { myMin = minf_cuda(myMin,g_idata[i*3+offset]); i += gridSize; } sdata[tid] = myMin; __syncthreads(); if (tid < 512) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 512]); __syncthreads(); if (tid < 256) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 256]); __syncthreads(); if (tid < 128) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 128]); __syncthreads(); if (tid < 64) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 64]); __syncthreads(); if (tid < 32){ warpMinReduce(sdata,tid); myMin = sdata[0]; } if (tid == 0) g_odata[blockIdx.x] = myMin; } /************************************************************************** *********************** cross product in CUDA **************************** *************************************************************************/ __device__ __inline__ vec3d cross(const vec3d a,const vec3d b) { vec3d c; c.x= a.y*b.z - a.z*b.y; c.y= a.z*b.x - a.x*b.z; c.z= a.x*b.y - a.y*b.x; return c; } /************************************************************************** *********************** Dot product in CUDA ****************************** *************************************************************************/ __device__ __inline__ double dot(const vec3d a, const vec3d b) { return a.x*b.x+a.y*b.y+a.z*b.z; } /************************************************************************** *********************** maximum value in a 4 valued array of floats******* *************************************************************************/ __device__ __inline__ float max4(float *t,int* indM){ float max=0; *indM=-1; for(int i=0;i<4;i++){ if (t[i]>max){ max=t[i]; *indM=i; } } return max; } /************************************************************************** ********* minimum nonzero value in a 4 valued array of float ************* *************************************************************************/ __device__ __inline__ float min4nz(float *t){ float min=1; for(int i=0;i<4;i++) min=(t[i]<min && t[i]!=0)?t[i]:min; return min; } /************************************************************************** ********* number of non zeroes in a 4 legth float array **** ************* *************************************************************************/ __device__ __inline__ int nnz(float *t){ int nz=0; for(int i=0;i<4;i++){ if(t[i]>0){ nz++; } } return nz; } /************************************************************************** *********************** Moller trumbore ********************************** **************************************************************************/ __device__ __inline__ float moller_trumbore(const float3 ray1, const float3 ray2, const vec3d trip1,const vec3d trip2,const vec3d trip3, const float safetyEpsilon){ vec3d direction,e1,e2; direction.x=ray2.x-ray1.x; direction.y=ray2.y-ray1.y; direction.z=ray2.z-ray1.z; e1.x =trip2.x-trip1.x; e1.y =trip2.y-trip1.y; e1.z =trip2.z-trip1.z; e2.x =trip3.x-trip1.x; e2.y =trip3.y-trip1.y; e2.z =trip3.z-trip1.z; vec3d q=cross(direction,e2); double a=dot(e1,q); if ((a>-EPSILON) & (a<EPSILON)){ // the vector is parallel to the plane (the intersection is at infinity) return 0.0f; } double f=1/a; vec3d s; s.x=ray1.x-trip1.x; s.y=ray1.y-trip1.y; s.z=ray1.z-trip1.z; double u=f*dot(s,q); if (u<0.0-safetyEpsilon){ // the intersection is outside of the triangle return 0.0f; } vec3d r=cross(s,e1); double v= f*dot(direction,r); if (v<0.0-safetyEpsilon || (u+v)>1.0+safetyEpsilon){ // the intersection is outside of the triangle return 0.0; } return f*dot(e2,r); } /************************************************************************** ***************************Tetra-line intersection************************ *************************************************************************/ __device__ __inline__ bool tetraLineIntersect(const unsigned long *elements,const float *vertices, const float3 ray1, const float3 ray2, const unsigned long elementId,float *t,bool computelenght,const float safetyEpsilon){ unsigned long auxNodeId[4]; auxNodeId[0]=elements[elementId*4+0]; auxNodeId[1]=elements[elementId*4+1]; auxNodeId[2]=elements[elementId*4+2]; auxNodeId[3]=elements[elementId*4+3]; vec3d triN1,triN2,triN3; float l1,l2,l3,l4; /////////////////////////////////////////////////////////////////////// // As modular arithmetic is bad on GPUs (flop-wise), I manually unroll the loop //for (int i=0;i<4;i++) /////////////////////////////////////////////////////////////////////// // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[1]*3+0]; triN2.y=vertices[auxNodeId[1]*3+1]; triN2.z=vertices[auxNodeId[1]*3+2]; triN3.x=vertices[auxNodeId[2]*3+0]; triN3.y=vertices[auxNodeId[2]*3+1]; triN3.z=vertices[auxNodeId[2]*3+2]; //compute l1=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[1]*3+0]; triN2.y=vertices[auxNodeId[1]*3+1]; triN2.z=vertices[auxNodeId[1]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l2=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[2]*3+0]; triN2.y=vertices[auxNodeId[2]*3+1]; triN2.z=vertices[auxNodeId[2]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l3=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[1]*3+0]; triN1.y=vertices[auxNodeId[1]*3+1]; triN1.z=vertices[auxNodeId[1]*3+2]; triN2.x=vertices[auxNodeId[2]*3+0]; triN2.y=vertices[auxNodeId[2]*3+1]; triN2.z=vertices[auxNodeId[2]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l4=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); //dump if ((l1==0.0)&&(l2==0.0)&&(l3==0.0)&&(l4==0.0)){ t[0]=0.0;t[1]=0.0;t[2]=0.0;t[3]=0.0; return false; }else{ t[0]=l1;t[1]=l2;t[2]=l3;t[3]=l4; // find which one is the intersection return true; } } /************************************************************************** ***************************Intersection between line-box****************** *************************************************************************/ __device__ bool rayBoxIntersect(const float3 ray1, const float3 ray2,const float3 nodemin, const float3 nodemax){ float3 direction; direction.x=ray2.x-ray1.x; direction.y=ray2.y-ray1.y; direction.z=ray2.z-ray1.z; float tmin,tymin,tzmin; float tmax,tymax,tzmax; if (direction.x >= 0){ tmin = (nodemin.x - ray1.x) / direction.x; tmax = (nodemax.x - ray1.x) / direction.x; }else{ tmin = (nodemax.x - ray1.x) / direction.x; tmax = (nodemin.x - ray1.x) / direction.x; } if (direction.y >= 0){ tymin = (nodemin.y - ray1.y) / direction.y; tymax = (nodemax.y - ray1.y) / direction.y; }else{ tymin = (nodemax.y - ray1.y) / direction.y; tymax = (nodemin.y - ray1.y) / direction.y; } if ( (tmin > tymax) || (tymin > tmax) ){ return false; } if (tymin > tmin){ tmin = tymin; } if (tymax < tmax){ tmax = tymax; } if (direction.z >= 0){ tzmin = (nodemin.z - ray1.z) / direction.z; tzmax = (nodemax.z - ray1.z) / direction.z; }else{ tzmin = (nodemax.z - ray1.z) / direction.z; tzmax = (nodemin.z - ray1.z) / direction.z; } if ((tmin > tzmax) || (tzmin > tmax)){ return false; } // If we wanted the ts as output //// // if (tzmin > tmin){ // tmin = tzmin; // } // // if (tzmax < tmax){ // tmax = tzmax; // } //// return true; } /************************************************************************** ******Fucntion to detect the first triangle to expand the graph*********** *************************************************************************/ template <int tree_depth> __global__ void initXrays(const unsigned long* elements, const float* vertices, const unsigned long *boundary,const unsigned long nboundary, float * d_res, Geometry geo, const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin, const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree) { // Depth first R-tree search unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; long crossingID=-1; // int* path=(int*)malloc(tree_depth*sizeof(int)); // int* n_checked=(int*)malloc(tree_depth*sizeof(int)); int path[tree_depth]; int n_checked[tree_depth]; #pragma unroll for (int i=0;i<tree_depth;i++) n_checked[i]=0; float safetyEpsilon=0.0000001f; float t[4]; float taux, tmin=1; int depth=0; // Compute detector position float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); float3 nodemin,nodemax; nodemax.x=(float)bin_box[root*6+0]; nodemax.y=(float)bin_box[root*6+1]; nodemax.z=(float)bin_box[root*6+2]; nodemin.x=(float)bin_box[root*6+3]; nodemin.y=(float)bin_box[root*6+4]; nodemin.z=(float)bin_box[root*6+5]; bool isinbox=rayBoxIntersect(source, det, nodemin,nodemax); if (!isinbox){ d_res[idx]=-1.0f; return; } bool finished=false; int next_node; // we know it intersecst, lets start from teh first one. depth=0; path[0]=root; n_checked[0]=0; // path[depth]=(int)bin_elements[root*(M+1)+0]; crossingID=-1; int iter=0; while (~finished){ iter++; // if the next one to check in the current node is the last one, then we have checked everything, // go up one node while((long)n_checked[depth]>=(long)bin_n_elements[path[depth]]){ depth--; } if (depth<0){ finished=true; d_res[idx]=(float)crossingID; return; } next_node=bin_elements[path[depth]*(M+1)+n_checked[depth]]; //get bounding box nodemax.x=bin_box[next_node*6+0]; nodemax.y=bin_box[next_node*6+1]; nodemax.z=bin_box[next_node*6+2]; nodemin.x=bin_box[next_node*6+3]; nodemin.y=bin_box[next_node*6+4]; nodemin.z=bin_box[next_node*6+5]; isinbox=rayBoxIntersect(source, det, nodemin,nodemax); // count that we checked it already n_checked[depth]++; if (isinbox){ if(!isleaf[next_node]){ // if its not a leaf, then we just go deeper depth++; n_checked[depth]=0;//lets make sure prior values do not interfere now. If we go deeper, it means its the first time on this node. path[depth]=next_node; }else{ // if its a leaf, we shoudl check the triangles for(unsigned int i=0;i<bin_n_elements[next_node];i++){ // check all triangles, obtain smallest t tetraLineIntersect(elements,vertices,source,det,boundary[bin_elements[next_node*(M+1)+i]],t,true,safetyEpsilon); // if there is an intersection if ((t[0]+t[1]+t[2]+t[3])!=0){ taux=min4nz(t); if (taux<tmin){ tmin=taux; crossingID=bin_elements[next_node*(M+1)+i]; } } }//endfor } }//end isinbox // If its not inside, then we just loop again and check the next one. } } template __global__ void initXrays<2>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<4>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<6>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<8>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<10>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<12>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<14>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); /************************************************************************** ******Fucntion to detect the first triangle to expand the graph*********** *************************************************************************/ /////////////////////////////////////////////////////////////////////////// ///////////////////////// NOT USED ////////////////////////// /////////////////////////////////////////////////////////////////////////// __global__ void initXraysBrute(const unsigned long* elements, const float* vertices, const unsigned long *boundary,const unsigned long nboundary, float * d_res, Geometry geo, const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const float3 nodemin,const float3 nodemax) { unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; // Compute detector position float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Should we even try? if the ray does not cross the boundary, dont try bool crossBound=rayBoxIntersect(source, det, nodemin,nodemax); if (!crossBound){ d_res[idx]=-1.0f; return; } // Check intersection with all elements in the boudnary unsigned long notintersect=nboundary; float t[4]; float t1,tinter=10000.0f; float safetyEpsilon=0.0000001f; unsigned long crossingID=0; //Check with all elements, and keep the one that gives lowest parameter while(notintersect==nboundary){ notintersect=0; for(unsigned long i=0 ;i<nboundary;i++){ tetraLineIntersect(elements,vertices,source,det,boundary[i],t,true,safetyEpsilon); if (nnz(t)==0){ notintersect++; }else{ t1=min4nz(t); if (t1<tinter){ tinter=t1; crossingID=i; } } } safetyEpsilon=safetyEpsilon*10; } d_res[idx]=(float)crossingID; } /************************************************************************** ******************The mein projection fucntion *************************** *************************************************************************/ __global__ void graphProject(const unsigned long *elements, const float *vertices,const unsigned long *boundary,const long *neighbours, const float * d_image, float * d_res, Geometry geo, float3 source, float3 deltaU, float3 deltaV, float3 uvOrigin){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; // Read initial position. Generate auxiliar variables for element tracking long current_element=(long)d_res[idx]; long previous_element; long aux_element; // Get the coordinates of the detector for this kernel float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // If the current element is "none", then we are done, we are not itnersecting the mesh if (current_element<0){ //no need to do stuff d_res[idx]=0.0f; return; } // initialize variables for the lengths and resutl float result=0.0f; float length,t1,t2; float t[4]; int indM; bool isIntersect; // Lets compute the first intersection outside the main loop. // The structure of this loop has to be identical to the one in InitXrays() or // there is risk of not getting the same floating point value bit by bit. float safeEpsilon=0.00001f; isIntersect=tetraLineIntersect(elements,vertices,source,det,boundary[current_element],t,true,0.0f); while(!isIntersect){ isIntersect=tetraLineIntersect(elements,vertices,source,det,boundary[current_element],t,true,safeEpsilon); if (nnz(t)<=1){ isIntersect=false; safeEpsilon*=10; } } // Reset the safety variable safeEpsilon=0.00001f; // Find the maximum and minimum non-zero intersection parameters t2=max4(t,&indM); t1=min4nz(t); // Lets get the ray (direction) and the current intersection length. float3 direction,p1,p2; direction.x=det.x-source.x; direction.y=det.y-source.y; direction.z=det.z-source.z; p2.x=direction.x* (t2); p2.y=direction.y* (t2); p2.z=direction.z* (t2); p1.x=direction.x* (t1); p1.y=direction.y* (t1); p1.z=direction.z* (t1); length=sqrt((p2.x-p1.x)*(p2.x-p1.x)+(p2.y-p1.y)*(p2.y-p1.y)+(p2.z-p1.z)*(p2.z-p1.z)); // Start accumulating the result result=d_image[boundary[current_element]]*length; // If t1 and t2 are the same, we need to make sure that the one we choose as // t2 (the one that will lead us to the next element) is the correct one. // Otherwise we will go out of the image, and the code will end. // This piece of code makes sure that is checked and swaps them otherwise. if(t1==t2){ aux_element=neighbours[boundary[current_element]*4+indM]; if(aux_element==-1){ int auxind; for(int i=0;i<4;i++){ if(indM!=i && t[i]==t1){ auxind=i; } } indM=auxind; } } // Grab the index of the next elements and save the current one for further checking previous_element=boundary[current_element]; current_element=neighbours[boundary[current_element]*4+indM]; // if its "none" then thats it, we are done. if (current_element==-1){ d_res[idx]=result; return; } float sumt; unsigned long c=0; bool noNeighbours=false; while(!noNeighbours && c<5000){ // RANDOM safe distance, change to something sensible // c is a counter to avoid infinite loops c++; // Check intersections we now this one is intersected )because it shares a face with the previosu one that was intersected) isIntersect=tetraLineIntersect(elements,vertices,source,det,(unsigned int)current_element,t,true,0.0f); while(!isIntersect){ // If intersection failed, then lets slightly increase the size of the triangle // (not really, we increase the bounds of acceptable intersection values) // We can do it without safety becasue we already know it must happen. isIntersect=tetraLineIntersect(elements,vertices,source,det,(unsigned int)current_element,t,true,safeEpsilon); if (nnz(t)<=1){ isIntersect=false; safeEpsilon*=10; } } safeEpsilon=0.00001f; // Find the maximum and minimum non-zero intersection parameters t2=max4(t,&indM); t1=min4nz(t); // if they are very similar just treat them as if they were the same // This was necesary in a previosu version, Its left here just in case its neeed again. ////// // if (fabsf(t2-t1)<0.00000001){ // t2=t1; // t[indM]=t1; // } ////// // Are they all zero? sumt=t[0]+t[1]+t[2]+t[3]; if (sumt!=0.0){ // compute intersection length and update result integral p2.x=direction.x* (t2); p2.y=direction.y* (t2); p2.z=direction.z* (t2); p1.x=direction.x* (t1); p1.y=direction.y* (t1); p1.z=direction.z* (t1); length=sqrt((p2.x-p1.x)*(p2.x-p1.x)+(p2.y-p1.y)*(p2.y-p1.y)+(p2.z-p1.z)*(p2.z-p1.z)); result+=d_image[current_element]*length; // Now lets make sure we can find the next element correctly // If t1 and t2 are the same, we need to make sure that the one we choose as // t2 (the one that will lead us to the next element) is the correct one. // Otherwise we will go backwards and get trapped in an infinite loop // This piece of code makes sure this does not happen. if(t1==t2){ aux_element=neighbours[current_element*4+indM]; if(aux_element==previous_element){ int auxind; for(int i=0;i<4;i++){ if(indM!=i && t[i]==t1){ auxind=i; } } indM=auxind; } } // Update the elements previous_element=current_element; current_element=neighbours[current_element*4+indM]; // if we are out then thats it, we are done. if (current_element==-1){ d_res[idx]=result; return; } continue; } // If there was no intrsection, then we are out. Can this even happen? noNeighbours=true; }//endwhile // It should never get here, ever. d_res[idx]=-1.0; return; } /************************************************************************** *********************** Main fucntion ************************************ *************************************************************************/ void graphForwardRay(float const * const image, Geometry geo, const double * angles,const unsigned int nangles, const float* nodes,const unsigned long nnodes, const unsigned long* elements,const unsigned long nelements, const long* neighbours,const unsigned long nneighbours, const unsigned long* boundary,const unsigned long nboundary, const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree,const long tree_depth, float ** result) { // Prepare for MultiGPU int deviceCount = 0; gpuErrchk(hipGetDeviceCount(&deviceCount)); if (deviceCount == 0) { mexErrMsgIdAndTxt("TriangleCT:graphForward:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) unsigned int dev; char * devicenames; hipDeviceProp_t deviceProp; for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicenames,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("TriangleCT:graphForward:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n graph_ray_projection.cu line 526."); break; } } devicenames=deviceProp.name; } hipSetDevice(0); hipGetDeviceProperties(&deviceProp, 0); unsigned long long mem_GPU_global=(unsigned long long)(deviceProp.totalGlobalMem*0.9); // This is the mandatory mem that we need to broadcast to all GPUs size_t num_bytes_img = nelements*sizeof(float); size_t num_bytes_nodes = nnodes*3*sizeof(float); size_t num_bytes_elements = nelements*4*sizeof(unsigned long); size_t num_bytes_neighbours = nneighbours*4*sizeof(long); size_t num_bytes_boundary = nboundary*sizeof(unsigned long); // R-tree size_t num_bytes_bin_n_elements = length_tree*sizeof(int); size_t num_bytes_bin_elements = length_tree*(M+1)*sizeof(long); size_t num_bytes_bin_box = 6*length_tree*sizeof(double); size_t num_bytes_MBR = 6*nboundary*sizeof(double); size_t num_bytes_isleaf=length_tree*sizeof(bool); unsigned long long mem_needed_graph=num_bytes_img+num_bytes_nodes+num_bytes_elements+num_bytes_neighbours+num_bytes_boundary; unsigned long long mem_free_GPU=mem_GPU_global-mem_needed_graph; // mexPrintf(" num_bytes_img %llu \n", num_bytes_img ); // mexPrintf("num_bytes_nodes %llu \n", num_bytes_nodes ); // mexPrintf("num_bytes_elements %llu \n", num_bytes_elements); // mexPrintf("num_bytes_neighbours %llu \n", num_bytes_neighbours ); // mexPrintf("num_bytes_boundary %llu \n", num_bytes_boundary); // mexPrintf("num_bytes_needed %llu \n", mem_needed_graph); // mexPrintf("num_bytes_GPU %llu \n", mem_GPU_global ); size_t num_bytes_proj = geo.nDetecU*geo.nDetecV * sizeof(float); if (mem_needed_graph>mem_GPU_global) mexErrMsgIdAndTxt("TriangleCT:graphForward:Memory","The entire mesh does not fit on the GPU \n"); if (num_bytes_proj>mem_free_GPU) mexErrMsgIdAndTxt("TriangleCT:graphForward:Memory","The entire mesh + attenuation values + 2 projection do not fit on a GPU.\n Dividig the projections is not supported \n"); float time; float timecopy=0, timekernel=0,timeaux; hipEvent_t start, stop; if (DEBUG_TIME){ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } //result float ** d_res= (float **)malloc(deviceCount*sizeof(float*)); // FE structured graph float** d_image=(float **)malloc(deviceCount*sizeof(float*)); float** d_nodes=(float **)malloc(deviceCount*sizeof(float*)); unsigned long** d_elements=(unsigned long **)malloc(deviceCount*sizeof(unsigned long*)); long ** d_neighbours=( long **)malloc(deviceCount*sizeof(long*)); unsigned long** d_boundary=(unsigned long **)malloc(deviceCount*sizeof(unsigned long*)); // R-tree vars int** d_bin_n_elements=(int **)malloc(deviceCount*sizeof(int*)); long** d_bin_elements= (long**)malloc(deviceCount*sizeof(long*)); double** d_bin_box=(double**)malloc(deviceCount*sizeof(double*)); double** d_MBR=(double**)malloc(deviceCount*sizeof(double*)); bool** d_isleaf=(bool**)malloc(deviceCount*sizeof(bool*)); //start allocation for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); // First send all the relevant data to CUDA, and allocate enough memory for the result gpuErrchk(hipMalloc((void **)&d_res[dev],num_bytes_proj)); gpuErrchk(hipMalloc((void **)&d_image[dev],num_bytes_img)); gpuErrchk(hipMemcpyAsync(d_image[dev],image,num_bytes_img,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_nodes[dev],num_bytes_nodes)); gpuErrchk(hipMemcpyAsync(d_nodes[dev],nodes,num_bytes_nodes,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_elements[dev],num_bytes_elements)); gpuErrchk(hipMemcpyAsync(d_elements[dev],elements,num_bytes_elements,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_neighbours[dev],num_bytes_neighbours)); gpuErrchk(hipMemcpyAsync(d_neighbours[dev],neighbours,num_bytes_neighbours,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_boundary[dev],num_bytes_boundary)); gpuErrchk(hipMemcpyAsync(d_boundary[dev],boundary,num_bytes_boundary,hipMemcpyHostToDevice)); // Now all the R-tree stuff gpuErrchk(hipMalloc((void **)&d_bin_n_elements[dev],num_bytes_bin_n_elements)); gpuErrchk(hipMemcpyAsync(d_bin_n_elements[dev],bin_n_elements,num_bytes_bin_n_elements,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_bin_elements[dev],num_bytes_bin_elements)); gpuErrchk(hipMemcpyAsync(d_bin_elements[dev],bin_elements,num_bytes_bin_elements,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_bin_box[dev],num_bytes_bin_box)); gpuErrchk(hipMemcpyAsync(d_bin_box[dev],bin_box,num_bytes_bin_box,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_MBR[dev],num_bytes_MBR)); gpuErrchk(hipMemcpyAsync(d_MBR[dev],MBR,num_bytes_MBR,hipMemcpyHostToDevice)); gpuErrchk(hipMalloc((void **)&d_isleaf[dev],num_bytes_isleaf)); gpuErrchk(hipMemcpyAsync(d_isleaf[dev],isleaf,num_bytes_isleaf,hipMemcpyHostToDevice)); } if (DEBUG_TIME){ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); mexPrintf("Time to memcpy: %3.1f ms \n", time); } gpuErrchk(hipDeviceSynchronize()); // KERNEL TIME! int divU,divV; divU=8; divV=8; dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1); dim3 block(divU,divV,1); float3 deltaU, deltaV, uvOrigin; float3 source; for (unsigned int i=0;i<nangles;i+=(unsigned int)deviceCount){ for (dev = 0; dev < deviceCount; dev++){ if (i+dev >=nangles){ //mexWarnMsgIdAndTxt("TriangleCT:graphBackward:GPUselect"," i+dev >=nangles \n"); break; } geo.alpha=angles[(i+dev)*3]; geo.theta=angles[(i+dev)*3+1]; geo.psi =angles[(i+dev)*3+2]; //dev=i%deviceCount; //dev=0; computeGeometricParams(geo, &source,&deltaU, &deltaV,&uvOrigin,i+dev); //gpuErrchk(hipDeviceSynchronize()); hipSetDevice(dev); if (DEBUG_TIME){ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } switch ((int)((tree_depth + 2 - 1) / 2) * 2){ case 2: hipLaunchKernelGGL(( initXrays<2>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 4: hipLaunchKernelGGL(( initXrays<4>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 6: hipLaunchKernelGGL(( initXrays<6>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 8: hipLaunchKernelGGL(( initXrays<8>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 10: hipLaunchKernelGGL(( initXrays<10>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 12: hipLaunchKernelGGL(( initXrays<12>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 14: hipLaunchKernelGGL(( initXrays<14>), dim3(grid),dim3(block) , 0, 0, d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; default: mexErrMsgIdAndTxt("MEX:graph_ray_projections","R*-Tree is to deep (more than 14)"); break; } if (DEBUG_TIME){ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); mexPrintf("Time to Init Kernel: %3.1f ms \n", time); } if (DEBUG_TIME){ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } gpuErrchk(hipDeviceSynchronize()) graphProject<< <grid,block >> >(d_elements[dev],d_nodes[dev],d_boundary[dev],d_neighbours[dev],d_image[dev],d_res[dev], geo,source,deltaU,deltaV,uvOrigin); gpuErrchk(hipDeviceSynchronize()) if (DEBUG_TIME){ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); mexPrintf("Time to proj Kernel: %3.1f ms \n", time); } } for (dev = 0; dev < deviceCount; dev++){ //gpuErrchk(hipDeviceSynchronize()); hipSetDevice(dev); gpuErrchk(hipMemcpyAsync(result[i+dev], d_res[dev], num_bytes_proj, hipMemcpyDeviceToHost)); } } // if (DEBUG_TIME){ // mexPrintf("Time of Kenrel: %3.1f ms \n", timekernel); // mexPrintf("Time of memcpy to Host: %3.1f ms \n", timecopy); // } if (DEBUG_TIME){ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); } // cudaGraphFree(&tempHostGraph,&tempHostElement,&tempHostNode); for (dev = 0; dev < deviceCount; dev++) { hipSetDevice(dev); hipFree(d_res[dev]); hipFree(d_image[dev]); hipFree(d_nodes[dev]); hipFree(d_neighbours[dev]); hipFree(d_elements[dev]); hipFree(d_boundary[dev]); //R tree stuff hipFree(d_bin_n_elements[dev]); hipFree(d_bin_elements[dev]); hipFree(d_bin_box[dev]); hipFree(d_MBR[dev]); hipFree(d_isleaf[dev]); } if (DEBUG_TIME){ hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop); mexPrintf("Time to free: %3.1f ms \n", time); } hipDeviceReset(); return; } void reduceNodes(float *d_nodes, unsigned long nnodes, float* max, float* min){ int divU; divU=MAXTHREADS; dim3 grid((nnodes+divU-1)/divU,1,1); dim3 block(divU,1,1); //auxiliary for reduction float* d_auxmax,*d_auxmin; gpuErrchk(hipMalloc((void **)&d_auxmax, sizeof(float)*(nnodes + MAXTHREADS - 1) / MAXTHREADS)); gpuErrchk(hipMalloc((void **)&d_auxmin, sizeof(float)*(nnodes + MAXTHREADS - 1) / MAXTHREADS)); //gpuErrchk(hipMalloc((void **)&debugreduce,MAXTHREADS*sizeof(float))); float** getFinalreducesmin=(float**)malloc(3*sizeof(float*)); float** getFinalreducesmax=(float**)malloc(3*sizeof(float*)); for (unsigned int i=0; i<3; i++){ getFinalreducesmin[i]=(float*)malloc(MAXTHREADS*sizeof(float)); getFinalreducesmax[i]=(float*)malloc(MAXTHREADS*sizeof(float)); } // for X,Y,Z for (unsigned int i=0; i<3; i++){ hipLaunchKernelGGL(( maxReduceOffset), dim3(grid), dim3(block), MAXTHREADS*sizeof(float), 0, d_nodes, d_auxmax, nnodes,i); hipLaunchKernelGGL(( minReduceOffset), dim3(grid), dim3(block), MAXTHREADS*sizeof(float), 0, d_nodes, d_auxmin, nnodes,i); gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); if (grid.x > 1) { // There shoudl be another reduce here, but, as in the reduce code we have we are accessing every 3 values // that means that we can not reuse it. The most efficient way of doing it is doing the final reduce (<1024) on cpu // therefore avoiding a deep copy. We coudl also rewrite the reduce twice, but its not worth my time now (D:). gpuErrchk(hipMemcpy( getFinalreducesmin[i], d_auxmin,grid.x*sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy( getFinalreducesmax[i], d_auxmax,grid.x*sizeof(float), hipMemcpyDeviceToHost)); max[i]=getFinalreducesmax[i][0]; max[i]=getFinalreducesmin[i][0]; for (unsigned int j=1;j<grid.x;j++){ max[i]=( getFinalreducesmax[i][j]>max[i])? getFinalreducesmax[i][j]:max[i]; min[i]=( getFinalreducesmin[i][j]<min[i])? getFinalreducesmin[i][j]:min[i]; } } else { gpuErrchk(hipMemcpy(&max[i], d_auxmax, sizeof(float), hipMemcpyDeviceToHost)); gpuErrchk(hipMemcpy(&min[i], d_auxmin, sizeof(float), hipMemcpyDeviceToHost)); } gpuErrchk(hipPeekAtLastError()); gpuErrchk(hipDeviceSynchronize()); } hipFree(d_auxmax); hipFree(d_auxmin); } // TODO: quite a lot of geometric transforms. void computeGeometricParams(const Geometry geo,float3 * source, float3* deltaU, float3* deltaV, float3* originUV,unsigned int idxAngle){ float3 auxOriginUV; float3 auxDeltaU; float3 auxDeltaV; auxOriginUV.x=-(geo.DSD[idxAngle]-geo.DSO[idxAngle]); // top left auxOriginUV.y=-geo.sDetecU/2+/*half a pixel*/geo.dDetecU/2; auxOriginUV.z=geo.sDetecV/2-/*half a pixel*/geo.dDetecV/2; //Offset of the detector auxOriginUV.y=auxOriginUV.y+geo.offDetecU[idxAngle]; auxOriginUV.z=auxOriginUV.z+geo.offDetecV[idxAngle]; // Change in U auxDeltaU.x=auxOriginUV.x; auxDeltaU.y=auxOriginUV.y+geo.dDetecU; auxDeltaU.z=auxOriginUV.z; //Change in V auxDeltaV.x=auxOriginUV.x; auxDeltaV.y=auxOriginUV.y; auxDeltaV.z=auxOriginUV.z-geo.dDetecV; float3 auxSource; auxSource.x=geo.DSO[idxAngle]; auxSource.y=0; auxSource.z=0; // rotate around axis. eulerZYZ(geo,&auxOriginUV); eulerZYZ(geo,&auxDeltaU); eulerZYZ(geo,&auxDeltaV); eulerZYZ(geo,&auxSource); // Offset image (instead of offseting image, -offset everything else) auxOriginUV.x =auxOriginUV.x-geo.offOrigX[idxAngle]; auxOriginUV.y =auxOriginUV.y-geo.offOrigY[idxAngle]; auxOriginUV.z =auxOriginUV.z-geo.offOrigZ[idxAngle]; auxDeltaU.x=auxDeltaU.x-geo.offOrigX[idxAngle]; auxDeltaU.y=auxDeltaU.y-geo.offOrigY[idxAngle]; auxDeltaU.z=auxDeltaU.z-geo.offOrigZ[idxAngle]; auxDeltaV.x=auxDeltaV.x-geo.offOrigX[idxAngle]; auxDeltaV.y=auxDeltaV.y-geo.offOrigY[idxAngle]; auxDeltaV.z=auxDeltaV.z-geo.offOrigZ[idxAngle]; auxSource.x=auxSource.x-geo.offOrigX[idxAngle]; auxSource.y=auxSource.y-geo.offOrigY[idxAngle]; auxSource.z=auxSource.z-geo.offOrigZ[idxAngle]; auxDeltaU.x=auxDeltaU.x-auxOriginUV.x; auxDeltaU.y=auxDeltaU.y-auxOriginUV.y; auxDeltaU.z=auxDeltaU.z-auxOriginUV.z; auxDeltaV.x=auxDeltaV.x-auxOriginUV.x; auxDeltaV.y=auxDeltaV.y-auxOriginUV.y; auxDeltaV.z=auxDeltaV.z-auxOriginUV.z; *originUV=auxOriginUV; *deltaU=auxDeltaU; *deltaV=auxDeltaV; *source=auxSource; return; } void eulerZYZ(Geometry geo, float3* point){ float3 auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; }
c73a919f032abef623589d2d991a4ef80cdc1372.cu
#include "graph_ray_projection.hpp" // This flag activates timing of the code #define DEBUG_TIME 0 #define MAXTHREADS 1024 #define EPSILON 0.000001 // Cuda error checking fucntion. #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { mexPrintf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort){ cudaDeviceReset(); mexErrMsgIdAndTxt("MEX:graph_ray_projections", "."); } } } __device__ __inline__ float maxf_cuda(float a,float b){ return (a>b)?a:b; } __device__ __inline__ float minf_cuda(float a,float b){ return (a<b)?a:b; } __device__ void warpMaxReduce(volatile float *sdata,unsigned int tid) { sdata[tid] = maxf_cuda(sdata[tid + 32],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 16],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 8],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 4],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 2],sdata[tid]); sdata[tid] = maxf_cuda(sdata[tid + 1],sdata[tid]); } __device__ void warpMinReduce(volatile float *sdata,unsigned int tid) { sdata[tid] = minf_cuda(sdata[tid + 32],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 16],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 8],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 4],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 2],sdata[tid]); sdata[tid] = minf_cuda(sdata[tid + 1],sdata[tid]); } __global__ void maxReduceOffset(float *g_idata, float *g_odata, unsigned long n,unsigned int offset){ extern __shared__ volatile float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + tid; unsigned int gridSize = blockDim.x*gridDim.x; float myMax = 0; while (i < n) { myMax = maxf_cuda(myMax,g_idata[i*3+offset]); i += gridSize; } sdata[tid] = myMax; __syncthreads(); if (tid < 512) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 512]); __syncthreads(); if (tid < 256) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 256]); __syncthreads(); if (tid < 128) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 128]); __syncthreads(); if (tid < 64) sdata[tid] = maxf_cuda(sdata[tid],sdata[tid + 64]); __syncthreads(); if (tid < 32){ warpMaxReduce(sdata,tid); myMax = sdata[0]; } if (tid == 0) g_odata[blockIdx.x] = myMax; } __global__ void minReduceOffset(float *g_idata, float *g_odata, unsigned long n,unsigned int offset){ extern __shared__ volatile float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + tid; unsigned int gridSize = blockDim.x*gridDim.x; float myMin = 0; while (i < n) { myMin = minf_cuda(myMin,g_idata[i*3+offset]); i += gridSize; } sdata[tid] = myMin; __syncthreads(); if (tid < 512) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 512]); __syncthreads(); if (tid < 256) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 256]); __syncthreads(); if (tid < 128) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 128]); __syncthreads(); if (tid < 64) sdata[tid] = minf_cuda(sdata[tid],sdata[tid + 64]); __syncthreads(); if (tid < 32){ warpMinReduce(sdata,tid); myMin = sdata[0]; } if (tid == 0) g_odata[blockIdx.x] = myMin; } /************************************************************************** *********************** cross product in CUDA **************************** *************************************************************************/ __device__ __inline__ vec3d cross(const vec3d a,const vec3d b) { vec3d c; c.x= a.y*b.z - a.z*b.y; c.y= a.z*b.x - a.x*b.z; c.z= a.x*b.y - a.y*b.x; return c; } /************************************************************************** *********************** Dot product in CUDA ****************************** *************************************************************************/ __device__ __inline__ double dot(const vec3d a, const vec3d b) { return a.x*b.x+a.y*b.y+a.z*b.z; } /************************************************************************** *********************** maximum value in a 4 valued array of floats******* *************************************************************************/ __device__ __inline__ float max4(float *t,int* indM){ float max=0; *indM=-1; for(int i=0;i<4;i++){ if (t[i]>max){ max=t[i]; *indM=i; } } return max; } /************************************************************************** ********* minimum nonzero value in a 4 valued array of float ************* *************************************************************************/ __device__ __inline__ float min4nz(float *t){ float min=1; for(int i=0;i<4;i++) min=(t[i]<min && t[i]!=0)?t[i]:min; return min; } /************************************************************************** ********* number of non zeroes in a 4 legth float array **** ************* *************************************************************************/ __device__ __inline__ int nnz(float *t){ int nz=0; for(int i=0;i<4;i++){ if(t[i]>0){ nz++; } } return nz; } /************************************************************************** *********************** Moller trumbore ********************************** **************************************************************************/ __device__ __inline__ float moller_trumbore(const float3 ray1, const float3 ray2, const vec3d trip1,const vec3d trip2,const vec3d trip3, const float safetyEpsilon){ vec3d direction,e1,e2; direction.x=ray2.x-ray1.x; direction.y=ray2.y-ray1.y; direction.z=ray2.z-ray1.z; e1.x =trip2.x-trip1.x; e1.y =trip2.y-trip1.y; e1.z =trip2.z-trip1.z; e2.x =trip3.x-trip1.x; e2.y =trip3.y-trip1.y; e2.z =trip3.z-trip1.z; vec3d q=cross(direction,e2); double a=dot(e1,q); if ((a>-EPSILON) & (a<EPSILON)){ // the vector is parallel to the plane (the intersection is at infinity) return 0.0f; } double f=1/a; vec3d s; s.x=ray1.x-trip1.x; s.y=ray1.y-trip1.y; s.z=ray1.z-trip1.z; double u=f*dot(s,q); if (u<0.0-safetyEpsilon){ // the intersection is outside of the triangle return 0.0f; } vec3d r=cross(s,e1); double v= f*dot(direction,r); if (v<0.0-safetyEpsilon || (u+v)>1.0+safetyEpsilon){ // the intersection is outside of the triangle return 0.0; } return f*dot(e2,r); } /************************************************************************** ***************************Tetra-line intersection************************ *************************************************************************/ __device__ __inline__ bool tetraLineIntersect(const unsigned long *elements,const float *vertices, const float3 ray1, const float3 ray2, const unsigned long elementId,float *t,bool computelenght,const float safetyEpsilon){ unsigned long auxNodeId[4]; auxNodeId[0]=elements[elementId*4+0]; auxNodeId[1]=elements[elementId*4+1]; auxNodeId[2]=elements[elementId*4+2]; auxNodeId[3]=elements[elementId*4+3]; vec3d triN1,triN2,triN3; float l1,l2,l3,l4; /////////////////////////////////////////////////////////////////////// // As modular arithmetic is bad on GPUs (flop-wise), I manually unroll the loop //for (int i=0;i<4;i++) /////////////////////////////////////////////////////////////////////// // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[1]*3+0]; triN2.y=vertices[auxNodeId[1]*3+1]; triN2.z=vertices[auxNodeId[1]*3+2]; triN3.x=vertices[auxNodeId[2]*3+0]; triN3.y=vertices[auxNodeId[2]*3+1]; triN3.z=vertices[auxNodeId[2]*3+2]; //compute l1=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[1]*3+0]; triN2.y=vertices[auxNodeId[1]*3+1]; triN2.z=vertices[auxNodeId[1]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l2=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[0]*3+0]; triN1.y=vertices[auxNodeId[0]*3+1]; triN1.z=vertices[auxNodeId[0]*3+2]; triN2.x=vertices[auxNodeId[2]*3+0]; triN2.y=vertices[auxNodeId[2]*3+1]; triN2.z=vertices[auxNodeId[2]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l3=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); // Triangle triN1.x=vertices[auxNodeId[1]*3+0]; triN1.y=vertices[auxNodeId[1]*3+1]; triN1.z=vertices[auxNodeId[1]*3+2]; triN2.x=vertices[auxNodeId[2]*3+0]; triN2.y=vertices[auxNodeId[2]*3+1]; triN2.z=vertices[auxNodeId[2]*3+2]; triN3.x=vertices[auxNodeId[3]*3+0]; triN3.y=vertices[auxNodeId[3]*3+1]; triN3.z=vertices[auxNodeId[3]*3+2]; //compute l4=moller_trumbore(ray1,ray2,triN1,triN2,triN3,safetyEpsilon); //dump if ((l1==0.0)&&(l2==0.0)&&(l3==0.0)&&(l4==0.0)){ t[0]=0.0;t[1]=0.0;t[2]=0.0;t[3]=0.0; return false; }else{ t[0]=l1;t[1]=l2;t[2]=l3;t[3]=l4; // find which one is the intersection return true; } } /************************************************************************** ***************************Intersection between line-box****************** *************************************************************************/ __device__ bool rayBoxIntersect(const float3 ray1, const float3 ray2,const float3 nodemin, const float3 nodemax){ float3 direction; direction.x=ray2.x-ray1.x; direction.y=ray2.y-ray1.y; direction.z=ray2.z-ray1.z; float tmin,tymin,tzmin; float tmax,tymax,tzmax; if (direction.x >= 0){ tmin = (nodemin.x - ray1.x) / direction.x; tmax = (nodemax.x - ray1.x) / direction.x; }else{ tmin = (nodemax.x - ray1.x) / direction.x; tmax = (nodemin.x - ray1.x) / direction.x; } if (direction.y >= 0){ tymin = (nodemin.y - ray1.y) / direction.y; tymax = (nodemax.y - ray1.y) / direction.y; }else{ tymin = (nodemax.y - ray1.y) / direction.y; tymax = (nodemin.y - ray1.y) / direction.y; } if ( (tmin > tymax) || (tymin > tmax) ){ return false; } if (tymin > tmin){ tmin = tymin; } if (tymax < tmax){ tmax = tymax; } if (direction.z >= 0){ tzmin = (nodemin.z - ray1.z) / direction.z; tzmax = (nodemax.z - ray1.z) / direction.z; }else{ tzmin = (nodemax.z - ray1.z) / direction.z; tzmax = (nodemin.z - ray1.z) / direction.z; } if ((tmin > tzmax) || (tzmin > tmax)){ return false; } // If we wanted the ts as output //// // if (tzmin > tmin){ // tmin = tzmin; // } // // if (tzmax < tmax){ // tmax = tzmax; // } //// return true; } /************************************************************************** ******Fucntion to detect the first triangle to expand the graph*********** *************************************************************************/ template <int tree_depth> __global__ void initXrays(const unsigned long* elements, const float* vertices, const unsigned long *boundary,const unsigned long nboundary, float * d_res, Geometry geo, const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin, const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree) { // Depth first R-tree search unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; long crossingID=-1; // int* path=(int*)malloc(tree_depth*sizeof(int)); // int* n_checked=(int*)malloc(tree_depth*sizeof(int)); int path[tree_depth]; int n_checked[tree_depth]; #pragma unroll for (int i=0;i<tree_depth;i++) n_checked[i]=0; float safetyEpsilon=0.0000001f; float t[4]; float taux, tmin=1; int depth=0; // Compute detector position float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); float3 nodemin,nodemax; nodemax.x=(float)bin_box[root*6+0]; nodemax.y=(float)bin_box[root*6+1]; nodemax.z=(float)bin_box[root*6+2]; nodemin.x=(float)bin_box[root*6+3]; nodemin.y=(float)bin_box[root*6+4]; nodemin.z=(float)bin_box[root*6+5]; bool isinbox=rayBoxIntersect(source, det, nodemin,nodemax); if (!isinbox){ d_res[idx]=-1.0f; return; } bool finished=false; int next_node; // we know it intersecst, lets start from teh first one. depth=0; path[0]=root; n_checked[0]=0; // path[depth]=(int)bin_elements[root*(M+1)+0]; crossingID=-1; int iter=0; while (~finished){ iter++; // if the next one to check in the current node is the last one, then we have checked everything, // go up one node while((long)n_checked[depth]>=(long)bin_n_elements[path[depth]]){ depth--; } if (depth<0){ finished=true; d_res[idx]=(float)crossingID; return; } next_node=bin_elements[path[depth]*(M+1)+n_checked[depth]]; //get bounding box nodemax.x=bin_box[next_node*6+0]; nodemax.y=bin_box[next_node*6+1]; nodemax.z=bin_box[next_node*6+2]; nodemin.x=bin_box[next_node*6+3]; nodemin.y=bin_box[next_node*6+4]; nodemin.z=bin_box[next_node*6+5]; isinbox=rayBoxIntersect(source, det, nodemin,nodemax); // count that we checked it already n_checked[depth]++; if (isinbox){ if(!isleaf[next_node]){ // if its not a leaf, then we just go deeper depth++; n_checked[depth]=0;//lets make sure prior values do not interfere now. If we go deeper, it means its the first time on this node. path[depth]=next_node; }else{ // if its a leaf, we shoudl check the triangles for(unsigned int i=0;i<bin_n_elements[next_node];i++){ // check all triangles, obtain smallest t tetraLineIntersect(elements,vertices,source,det,boundary[bin_elements[next_node*(M+1)+i]],t,true,safetyEpsilon); // if there is an intersection if ((t[0]+t[1]+t[2]+t[3])!=0){ taux=min4nz(t); if (taux<tmin){ tmin=taux; crossingID=bin_elements[next_node*(M+1)+i]; } } }//endfor } }//end isinbox // If its not inside, then we just loop again and check the next one. } } template __global__ void initXrays<2>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<4>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<6>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<8>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<10>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<12>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); template __global__ void initXrays<14>(const unsigned long* elements, const float* vertices,const unsigned long *boundary,const unsigned long nboundary,float * d_res, Geometry geo,const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree); /************************************************************************** ******Fucntion to detect the first triangle to expand the graph*********** *************************************************************************/ /////////////////////////////////////////////////////////////////////////// ///////////////////////// NOT USED ////////////////////////// /////////////////////////////////////////////////////////////////////////// __global__ void initXraysBrute(const unsigned long* elements, const float* vertices, const unsigned long *boundary,const unsigned long nboundary, float * d_res, Geometry geo, const float3 source,const float3 deltaU,const float3 deltaV,const float3 uvOrigin,const float3 nodemin,const float3 nodemax) { unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; // Compute detector position float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // Should we even try? if the ray does not cross the boundary, dont try bool crossBound=rayBoxIntersect(source, det, nodemin,nodemax); if (!crossBound){ d_res[idx]=-1.0f; return; } // Check intersection with all elements in the boudnary unsigned long notintersect=nboundary; float t[4]; float t1,tinter=10000.0f; float safetyEpsilon=0.0000001f; unsigned long crossingID=0; //Check with all elements, and keep the one that gives lowest parameter while(notintersect==nboundary){ notintersect=0; for(unsigned long i=0 ;i<nboundary;i++){ tetraLineIntersect(elements,vertices,source,det,boundary[i],t,true,safetyEpsilon); if (nnz(t)==0){ notintersect++; }else{ t1=min4nz(t); if (t1<tinter){ tinter=t1; crossingID=i; } } } safetyEpsilon=safetyEpsilon*10; } d_res[idx]=(float)crossingID; } /************************************************************************** ******************The mein projection fucntion *************************** *************************************************************************/ __global__ void graphProject(const unsigned long *elements, const float *vertices,const unsigned long *boundary,const long *neighbours, const float * d_image, float * d_res, Geometry geo, float3 source, float3 deltaU, float3 deltaV, float3 uvOrigin){ unsigned long y = blockIdx.y * blockDim.y + threadIdx.y; unsigned long x = blockIdx.x * blockDim.x + threadIdx.x; unsigned long idx = x * geo.nDetecV + y; if ((x>= geo.nDetecU) || (y>= geo.nDetecV)) return; unsigned int pixelV =(unsigned int)geo.nDetecV- y-1; unsigned int pixelU =(unsigned int) x; // Read initial position. Generate auxiliar variables for element tracking long current_element=(long)d_res[idx]; long previous_element; long aux_element; // Get the coordinates of the detector for this kernel float3 det; det.x=(uvOrigin.x+pixelU*deltaU.x+pixelV*deltaV.x); det.y=(uvOrigin.y+pixelU*deltaU.y+pixelV*deltaV.y); det.z=(uvOrigin.z+pixelU*deltaU.z+pixelV*deltaV.z); // If the current element is "none", then we are done, we are not itnersecting the mesh if (current_element<0){ //no need to do stuff d_res[idx]=0.0f; return; } // initialize variables for the lengths and resutl float result=0.0f; float length,t1,t2; float t[4]; int indM; bool isIntersect; // Lets compute the first intersection outside the main loop. // The structure of this loop has to be identical to the one in InitXrays() or // there is risk of not getting the same floating point value bit by bit. float safeEpsilon=0.00001f; isIntersect=tetraLineIntersect(elements,vertices,source,det,boundary[current_element],t,true,0.0f); while(!isIntersect){ isIntersect=tetraLineIntersect(elements,vertices,source,det,boundary[current_element],t,true,safeEpsilon); if (nnz(t)<=1){ isIntersect=false; safeEpsilon*=10; } } // Reset the safety variable safeEpsilon=0.00001f; // Find the maximum and minimum non-zero intersection parameters t2=max4(t,&indM); t1=min4nz(t); // Lets get the ray (direction) and the current intersection length. float3 direction,p1,p2; direction.x=det.x-source.x; direction.y=det.y-source.y; direction.z=det.z-source.z; p2.x=direction.x* (t2); p2.y=direction.y* (t2); p2.z=direction.z* (t2); p1.x=direction.x* (t1); p1.y=direction.y* (t1); p1.z=direction.z* (t1); length=sqrt((p2.x-p1.x)*(p2.x-p1.x)+(p2.y-p1.y)*(p2.y-p1.y)+(p2.z-p1.z)*(p2.z-p1.z)); // Start accumulating the result result=d_image[boundary[current_element]]*length; // If t1 and t2 are the same, we need to make sure that the one we choose as // t2 (the one that will lead us to the next element) is the correct one. // Otherwise we will go out of the image, and the code will end. // This piece of code makes sure that is checked and swaps them otherwise. if(t1==t2){ aux_element=neighbours[boundary[current_element]*4+indM]; if(aux_element==-1){ int auxind; for(int i=0;i<4;i++){ if(indM!=i && t[i]==t1){ auxind=i; } } indM=auxind; } } // Grab the index of the next elements and save the current one for further checking previous_element=boundary[current_element]; current_element=neighbours[boundary[current_element]*4+indM]; // if its "none" then thats it, we are done. if (current_element==-1){ d_res[idx]=result; return; } float sumt; unsigned long c=0; bool noNeighbours=false; while(!noNeighbours && c<5000){ // RANDOM safe distance, change to something sensible // c is a counter to avoid infinite loops c++; // Check intersections we now this one is intersected )because it shares a face with the previosu one that was intersected) isIntersect=tetraLineIntersect(elements,vertices,source,det,(unsigned int)current_element,t,true,0.0f); while(!isIntersect){ // If intersection failed, then lets slightly increase the size of the triangle // (not really, we increase the bounds of acceptable intersection values) // We can do it without safety becasue we already know it must happen. isIntersect=tetraLineIntersect(elements,vertices,source,det,(unsigned int)current_element,t,true,safeEpsilon); if (nnz(t)<=1){ isIntersect=false; safeEpsilon*=10; } } safeEpsilon=0.00001f; // Find the maximum and minimum non-zero intersection parameters t2=max4(t,&indM); t1=min4nz(t); // if they are very similar just treat them as if they were the same // This was necesary in a previosu version, Its left here just in case its neeed again. ////// // if (fabsf(t2-t1)<0.00000001){ // t2=t1; // t[indM]=t1; // } ////// // Are they all zero? sumt=t[0]+t[1]+t[2]+t[3]; if (sumt!=0.0){ // compute intersection length and update result integral p2.x=direction.x* (t2); p2.y=direction.y* (t2); p2.z=direction.z* (t2); p1.x=direction.x* (t1); p1.y=direction.y* (t1); p1.z=direction.z* (t1); length=sqrt((p2.x-p1.x)*(p2.x-p1.x)+(p2.y-p1.y)*(p2.y-p1.y)+(p2.z-p1.z)*(p2.z-p1.z)); result+=d_image[current_element]*length; // Now lets make sure we can find the next element correctly // If t1 and t2 are the same, we need to make sure that the one we choose as // t2 (the one that will lead us to the next element) is the correct one. // Otherwise we will go backwards and get trapped in an infinite loop // This piece of code makes sure this does not happen. if(t1==t2){ aux_element=neighbours[current_element*4+indM]; if(aux_element==previous_element){ int auxind; for(int i=0;i<4;i++){ if(indM!=i && t[i]==t1){ auxind=i; } } indM=auxind; } } // Update the elements previous_element=current_element; current_element=neighbours[current_element*4+indM]; // if we are out then thats it, we are done. if (current_element==-1){ d_res[idx]=result; return; } continue; } // If there was no intrsection, then we are out. Can this even happen? noNeighbours=true; }//endwhile // It should never get here, ever. d_res[idx]=-1.0; return; } /************************************************************************** *********************** Main fucntion ************************************ *************************************************************************/ void graphForwardRay(float const * const image, Geometry geo, const double * angles,const unsigned int nangles, const float* nodes,const unsigned long nnodes, const unsigned long* elements,const unsigned long nelements, const long* neighbours,const unsigned long nneighbours, const unsigned long* boundary,const unsigned long nboundary, const int* bin_n_elements,const long* bin_elements,const double* bin_box,const int M,const int m,const double* MBR,const bool* isleaf,const long root,const long length_tree,const long tree_depth, float ** result) { // Prepare for MultiGPU int deviceCount = 0; gpuErrchk(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { mexErrMsgIdAndTxt("TriangleCT:graphForward:GPUselect","There are no available device(s) that support CUDA\n"); } // // CODE assumes // 1.-All available devices are usable by this code // 2.-All available devices are equal, they are the same machine (warning trhown) unsigned int dev; char * devicenames; cudaDeviceProp deviceProp; for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaGetDeviceProperties(&deviceProp, dev); if (dev>0){ if (strcmp(devicenames,deviceProp.name)!=0){ mexWarnMsgIdAndTxt("TriangleCT:graphForward:GPUselect","Detected one (or more) different GPUs.\n This code is not smart enough to separate the memory GPU wise if they have different computational times or memory limits.\n First GPU parameters used. If the code errors you might need to change the way GPU selection is performed. \n graph_ray_projection.cu line 526."); break; } } devicenames=deviceProp.name; } cudaSetDevice(0); cudaGetDeviceProperties(&deviceProp, 0); unsigned long long mem_GPU_global=(unsigned long long)(deviceProp.totalGlobalMem*0.9); // This is the mandatory mem that we need to broadcast to all GPUs size_t num_bytes_img = nelements*sizeof(float); size_t num_bytes_nodes = nnodes*3*sizeof(float); size_t num_bytes_elements = nelements*4*sizeof(unsigned long); size_t num_bytes_neighbours = nneighbours*4*sizeof(long); size_t num_bytes_boundary = nboundary*sizeof(unsigned long); // R-tree size_t num_bytes_bin_n_elements = length_tree*sizeof(int); size_t num_bytes_bin_elements = length_tree*(M+1)*sizeof(long); size_t num_bytes_bin_box = 6*length_tree*sizeof(double); size_t num_bytes_MBR = 6*nboundary*sizeof(double); size_t num_bytes_isleaf=length_tree*sizeof(bool); unsigned long long mem_needed_graph=num_bytes_img+num_bytes_nodes+num_bytes_elements+num_bytes_neighbours+num_bytes_boundary; unsigned long long mem_free_GPU=mem_GPU_global-mem_needed_graph; // mexPrintf(" num_bytes_img %llu \n", num_bytes_img ); // mexPrintf("num_bytes_nodes %llu \n", num_bytes_nodes ); // mexPrintf("num_bytes_elements %llu \n", num_bytes_elements); // mexPrintf("num_bytes_neighbours %llu \n", num_bytes_neighbours ); // mexPrintf("num_bytes_boundary %llu \n", num_bytes_boundary); // mexPrintf("num_bytes_needed %llu \n", mem_needed_graph); // mexPrintf("num_bytes_GPU %llu \n", mem_GPU_global ); size_t num_bytes_proj = geo.nDetecU*geo.nDetecV * sizeof(float); if (mem_needed_graph>mem_GPU_global) mexErrMsgIdAndTxt("TriangleCT:graphForward:Memory","The entire mesh does not fit on the GPU \n"); if (num_bytes_proj>mem_free_GPU) mexErrMsgIdAndTxt("TriangleCT:graphForward:Memory","The entire mesh + attenuation values + 2 projection do not fit on a GPU.\n Dividig the projections is not supported \n"); float time; float timecopy=0, timekernel=0,timeaux; cudaEvent_t start, stop; if (DEBUG_TIME){ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } //result float ** d_res= (float **)malloc(deviceCount*sizeof(float*)); // FE structured graph float** d_image=(float **)malloc(deviceCount*sizeof(float*)); float** d_nodes=(float **)malloc(deviceCount*sizeof(float*)); unsigned long** d_elements=(unsigned long **)malloc(deviceCount*sizeof(unsigned long*)); long ** d_neighbours=( long **)malloc(deviceCount*sizeof(long*)); unsigned long** d_boundary=(unsigned long **)malloc(deviceCount*sizeof(unsigned long*)); // R-tree vars int** d_bin_n_elements=(int **)malloc(deviceCount*sizeof(int*)); long** d_bin_elements= (long**)malloc(deviceCount*sizeof(long*)); double** d_bin_box=(double**)malloc(deviceCount*sizeof(double*)); double** d_MBR=(double**)malloc(deviceCount*sizeof(double*)); bool** d_isleaf=(bool**)malloc(deviceCount*sizeof(bool*)); //start allocation for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); // First send all the relevant data to CUDA, and allocate enough memory for the result gpuErrchk(cudaMalloc((void **)&d_res[dev],num_bytes_proj)); gpuErrchk(cudaMalloc((void **)&d_image[dev],num_bytes_img)); gpuErrchk(cudaMemcpyAsync(d_image[dev],image,num_bytes_img,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_nodes[dev],num_bytes_nodes)); gpuErrchk(cudaMemcpyAsync(d_nodes[dev],nodes,num_bytes_nodes,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_elements[dev],num_bytes_elements)); gpuErrchk(cudaMemcpyAsync(d_elements[dev],elements,num_bytes_elements,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_neighbours[dev],num_bytes_neighbours)); gpuErrchk(cudaMemcpyAsync(d_neighbours[dev],neighbours,num_bytes_neighbours,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_boundary[dev],num_bytes_boundary)); gpuErrchk(cudaMemcpyAsync(d_boundary[dev],boundary,num_bytes_boundary,cudaMemcpyHostToDevice)); // Now all the R-tree stuff gpuErrchk(cudaMalloc((void **)&d_bin_n_elements[dev],num_bytes_bin_n_elements)); gpuErrchk(cudaMemcpyAsync(d_bin_n_elements[dev],bin_n_elements,num_bytes_bin_n_elements,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_bin_elements[dev],num_bytes_bin_elements)); gpuErrchk(cudaMemcpyAsync(d_bin_elements[dev],bin_elements,num_bytes_bin_elements,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_bin_box[dev],num_bytes_bin_box)); gpuErrchk(cudaMemcpyAsync(d_bin_box[dev],bin_box,num_bytes_bin_box,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_MBR[dev],num_bytes_MBR)); gpuErrchk(cudaMemcpyAsync(d_MBR[dev],MBR,num_bytes_MBR,cudaMemcpyHostToDevice)); gpuErrchk(cudaMalloc((void **)&d_isleaf[dev],num_bytes_isleaf)); gpuErrchk(cudaMemcpyAsync(d_isleaf[dev],isleaf,num_bytes_isleaf,cudaMemcpyHostToDevice)); } if (DEBUG_TIME){ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); mexPrintf("Time to memcpy: %3.1f ms \n", time); } gpuErrchk(cudaDeviceSynchronize()); // KERNEL TIME! int divU,divV; divU=8; divV=8; dim3 grid((geo.nDetecU+divU-1)/divU,(geo.nDetecV+divV-1)/divV,1); dim3 block(divU,divV,1); float3 deltaU, deltaV, uvOrigin; float3 source; for (unsigned int i=0;i<nangles;i+=(unsigned int)deviceCount){ for (dev = 0; dev < deviceCount; dev++){ if (i+dev >=nangles){ //mexWarnMsgIdAndTxt("TriangleCT:graphBackward:GPUselect"," i+dev >=nangles \n"); break; } geo.alpha=angles[(i+dev)*3]; geo.theta=angles[(i+dev)*3+1]; geo.psi =angles[(i+dev)*3+2]; //dev=i%deviceCount; //dev=0; computeGeometricParams(geo, &source,&deltaU, &deltaV,&uvOrigin,i+dev); //gpuErrchk(cudaDeviceSynchronize()); cudaSetDevice(dev); if (DEBUG_TIME){ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } switch ((int)((tree_depth + 2 - 1) / 2) * 2){ case 2: initXrays<2><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 4: initXrays<4><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 6: initXrays<6><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 8: initXrays<8><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 10: initXrays<10><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 12: initXrays<12><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; case 14: initXrays<14><<<grid,block >>>(d_elements[dev],d_nodes[dev],d_boundary[dev],nboundary, d_res[dev], geo, source,deltaU, deltaV,uvOrigin, d_bin_n_elements[dev],d_bin_elements[dev],d_bin_box[dev],M,m,d_MBR[dev],d_isleaf[dev],root,length_tree); break; default: mexErrMsgIdAndTxt("MEX:graph_ray_projections","R*-Tree is to deep (more than 14)"); break; } if (DEBUG_TIME){ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); mexPrintf("Time to Init Kernel: %3.1f ms \n", time); } if (DEBUG_TIME){ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } gpuErrchk(cudaDeviceSynchronize()) graphProject<< <grid,block >> >(d_elements[dev],d_nodes[dev],d_boundary[dev],d_neighbours[dev],d_image[dev],d_res[dev], geo,source,deltaU,deltaV,uvOrigin); gpuErrchk(cudaDeviceSynchronize()) if (DEBUG_TIME){ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); mexPrintf("Time to proj Kernel: %3.1f ms \n", time); } } for (dev = 0; dev < deviceCount; dev++){ //gpuErrchk(cudaDeviceSynchronize()); cudaSetDevice(dev); gpuErrchk(cudaMemcpyAsync(result[i+dev], d_res[dev], num_bytes_proj, cudaMemcpyDeviceToHost)); } } // if (DEBUG_TIME){ // mexPrintf("Time of Kenrel: %3.1f ms \n", timekernel); // mexPrintf("Time of memcpy to Host: %3.1f ms \n", timecopy); // } if (DEBUG_TIME){ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); } // cudaGraphFree(&tempHostGraph,&tempHostElement,&tempHostNode); for (dev = 0; dev < deviceCount; dev++) { cudaSetDevice(dev); cudaFree(d_res[dev]); cudaFree(d_image[dev]); cudaFree(d_nodes[dev]); cudaFree(d_neighbours[dev]); cudaFree(d_elements[dev]); cudaFree(d_boundary[dev]); //R tree stuff cudaFree(d_bin_n_elements[dev]); cudaFree(d_bin_elements[dev]); cudaFree(d_bin_box[dev]); cudaFree(d_MBR[dev]); cudaFree(d_isleaf[dev]); } if (DEBUG_TIME){ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); mexPrintf("Time to free: %3.1f ms \n", time); } cudaDeviceReset(); return; } void reduceNodes(float *d_nodes, unsigned long nnodes, float* max, float* min){ int divU; divU=MAXTHREADS; dim3 grid((nnodes+divU-1)/divU,1,1); dim3 block(divU,1,1); //auxiliary for reduction float* d_auxmax,*d_auxmin; gpuErrchk(cudaMalloc((void **)&d_auxmax, sizeof(float)*(nnodes + MAXTHREADS - 1) / MAXTHREADS)); gpuErrchk(cudaMalloc((void **)&d_auxmin, sizeof(float)*(nnodes + MAXTHREADS - 1) / MAXTHREADS)); //gpuErrchk(cudaMalloc((void **)&debugreduce,MAXTHREADS*sizeof(float))); float** getFinalreducesmin=(float**)malloc(3*sizeof(float*)); float** getFinalreducesmax=(float**)malloc(3*sizeof(float*)); for (unsigned int i=0; i<3; i++){ getFinalreducesmin[i]=(float*)malloc(MAXTHREADS*sizeof(float)); getFinalreducesmax[i]=(float*)malloc(MAXTHREADS*sizeof(float)); } // for X,Y,Z for (unsigned int i=0; i<3; i++){ maxReduceOffset<<<grid, block, MAXTHREADS*sizeof(float)>>>(d_nodes, d_auxmax, nnodes,i); minReduceOffset<<<grid, block, MAXTHREADS*sizeof(float)>>>(d_nodes, d_auxmin, nnodes,i); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); if (grid.x > 1) { // There shoudl be another reduce here, but, as in the reduce code we have we are accessing every 3 values // that means that we can not reuse it. The most efficient way of doing it is doing the final reduce (<1024) on cpu // therefore avoiding a deep copy. We coudl also rewrite the reduce twice, but its not worth my time now (D:). gpuErrchk(cudaMemcpy( getFinalreducesmin[i], d_auxmin,grid.x*sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy( getFinalreducesmax[i], d_auxmax,grid.x*sizeof(float), cudaMemcpyDeviceToHost)); max[i]=getFinalreducesmax[i][0]; max[i]=getFinalreducesmin[i][0]; for (unsigned int j=1;j<grid.x;j++){ max[i]=( getFinalreducesmax[i][j]>max[i])? getFinalreducesmax[i][j]:max[i]; min[i]=( getFinalreducesmin[i][j]<min[i])? getFinalreducesmin[i][j]:min[i]; } } else { gpuErrchk(cudaMemcpy(&max[i], d_auxmax, sizeof(float), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(&min[i], d_auxmin, sizeof(float), cudaMemcpyDeviceToHost)); } gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); } cudaFree(d_auxmax); cudaFree(d_auxmin); } // TODO: quite a lot of geometric transforms. void computeGeometricParams(const Geometry geo,float3 * source, float3* deltaU, float3* deltaV, float3* originUV,unsigned int idxAngle){ float3 auxOriginUV; float3 auxDeltaU; float3 auxDeltaV; auxOriginUV.x=-(geo.DSD[idxAngle]-geo.DSO[idxAngle]); // top left auxOriginUV.y=-geo.sDetecU/2+/*half a pixel*/geo.dDetecU/2; auxOriginUV.z=geo.sDetecV/2-/*half a pixel*/geo.dDetecV/2; //Offset of the detector auxOriginUV.y=auxOriginUV.y+geo.offDetecU[idxAngle]; auxOriginUV.z=auxOriginUV.z+geo.offDetecV[idxAngle]; // Change in U auxDeltaU.x=auxOriginUV.x; auxDeltaU.y=auxOriginUV.y+geo.dDetecU; auxDeltaU.z=auxOriginUV.z; //Change in V auxDeltaV.x=auxOriginUV.x; auxDeltaV.y=auxOriginUV.y; auxDeltaV.z=auxOriginUV.z-geo.dDetecV; float3 auxSource; auxSource.x=geo.DSO[idxAngle]; auxSource.y=0; auxSource.z=0; // rotate around axis. eulerZYZ(geo,&auxOriginUV); eulerZYZ(geo,&auxDeltaU); eulerZYZ(geo,&auxDeltaV); eulerZYZ(geo,&auxSource); // Offset image (instead of offseting image, -offset everything else) auxOriginUV.x =auxOriginUV.x-geo.offOrigX[idxAngle]; auxOriginUV.y =auxOriginUV.y-geo.offOrigY[idxAngle]; auxOriginUV.z =auxOriginUV.z-geo.offOrigZ[idxAngle]; auxDeltaU.x=auxDeltaU.x-geo.offOrigX[idxAngle]; auxDeltaU.y=auxDeltaU.y-geo.offOrigY[idxAngle]; auxDeltaU.z=auxDeltaU.z-geo.offOrigZ[idxAngle]; auxDeltaV.x=auxDeltaV.x-geo.offOrigX[idxAngle]; auxDeltaV.y=auxDeltaV.y-geo.offOrigY[idxAngle]; auxDeltaV.z=auxDeltaV.z-geo.offOrigZ[idxAngle]; auxSource.x=auxSource.x-geo.offOrigX[idxAngle]; auxSource.y=auxSource.y-geo.offOrigY[idxAngle]; auxSource.z=auxSource.z-geo.offOrigZ[idxAngle]; auxDeltaU.x=auxDeltaU.x-auxOriginUV.x; auxDeltaU.y=auxDeltaU.y-auxOriginUV.y; auxDeltaU.z=auxDeltaU.z-auxOriginUV.z; auxDeltaV.x=auxDeltaV.x-auxOriginUV.x; auxDeltaV.y=auxDeltaV.y-auxOriginUV.y; auxDeltaV.z=auxDeltaV.z-auxOriginUV.z; *originUV=auxOriginUV; *deltaU=auxDeltaU; *deltaV=auxDeltaV; *source=auxSource; return; } void eulerZYZ(Geometry geo, float3* point){ float3 auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=(+cos(geo.alpha)*cos(geo.theta)*cos(geo.psi)-sin(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-cos(geo.alpha)*cos(geo.theta)*sin(geo.psi)-sin(geo.alpha)*cos(geo.psi))*auxPoint.y+ cos(geo.alpha)*sin(geo.theta)*auxPoint.z; point->y=(+sin(geo.alpha)*cos(geo.theta)*cos(geo.psi)+cos(geo.alpha)*sin(geo.psi))*auxPoint.x+ (-sin(geo.alpha)*cos(geo.theta)*sin(geo.psi)+cos(geo.alpha)*cos(geo.psi))*auxPoint.y+ sin(geo.alpha)*sin(geo.theta)*auxPoint.z; point->z=-sin(geo.theta)*cos(geo.psi)*auxPoint.x+ sin(geo.theta)*sin(geo.psi)*auxPoint.y+ cos(geo.theta)*auxPoint.z; }
68da26ea1e220cb6f753a024ccf125b4a3b12b20.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/repeat_strings.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/functional.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> namespace cudf { namespace strings { namespace detail { std::unique_ptr<string_scalar> repeat_string(string_scalar const& input, size_type repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (!input.is_valid(stream)) { return std::make_unique<string_scalar>("", false, stream, mr); } if (input.size() == 0 || repeat_times <= 0) { return std::make_unique<string_scalar>("", true, stream, mr); } if (repeat_times == 1) { return std::make_unique<string_scalar>(input, stream, mr); } CUDF_EXPECTS(input.size() <= std::numeric_limits<size_type>::max() / repeat_times, "The output string has size that exceeds the maximum allowed size."); auto const str_size = input.size(); auto const iter = thrust::make_counting_iterator(0); auto buff = rmm::device_buffer(repeat_times * input.size(), stream, mr); // Pull data from the input string into each byte of the output string. thrust::transform(rmm::exec_policy(stream), iter, iter + repeat_times * str_size, static_cast<char*>(buff.data()), [in_ptr = input.data(), str_size] __device__(const auto idx) { return in_ptr[idx % str_size]; }); return std::make_unique<string_scalar>(std::move(buff)); } namespace { /** * @brief Generate a strings column in which each row is an empty string or a null. * * The output strings column has the same bitmask as the input column. */ auto generate_empty_output(strings_column_view const& input, size_type strings_count, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto chars_column = create_chars_child_column(0, stream, mr); auto offsets_column = make_numeric_column( data_type{type_to_id<offset_type>()}, strings_count + 1, mask_state::UNALLOCATED, stream, mr); CUDA_TRY(hipMemsetAsync(offsets_column->mutable_view().template data<offset_type>(), 0, offsets_column->size() * sizeof(offset_type), stream.value())); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input.parent(), stream, mr)); } /** * @brief Functor to compute output string sizes and repeat the input strings. * * This functor is called only when `repeat_times > 0`. In addition, the total number of threads * running this functor is `repeat_times * strings_count` (instead of `string_count`) for maximizing * parallelism and better load-balancing. */ struct compute_size_and_repeat_fn { column_device_view const strings_dv; size_type const repeat_times; bool const has_nulls; offset_type* d_offsets{nullptr}; // If d_chars == nullptr: only compute sizes of the output strings. // If d_chars != nullptr: only repeat strings. char* d_chars{nullptr}; // `idx` will be in the range of [0, repeat_times * strings_count). __device__ void operator()(size_type const idx) const noexcept { auto const str_idx = idx / repeat_times; // value cycles in [0, string_count) auto const repeat_idx = idx % repeat_times; // value cycles in [0, repeat_times) auto const is_valid = !has_nulls || strings_dv.is_valid_nocheck(str_idx); if (!d_chars && repeat_idx == 0) { d_offsets[str_idx] = is_valid ? repeat_times * strings_dv.element<string_view>(str_idx).size_bytes() : 0; } // Each input string will be copied by `repeat_times` threads into the output string. if (d_chars && is_valid) { auto const d_str = strings_dv.element<string_view>(str_idx); auto const str_size = d_str.size_bytes(); if (str_size > 0) { auto const input_ptr = d_str.data(); auto const output_ptr = d_chars + d_offsets[str_idx] + repeat_idx * str_size; std::memcpy(output_ptr, input_ptr, str_size); } } } }; } // namespace std::unique_ptr<column> repeat_strings(strings_column_view const& input, size_type repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const strings_count = input.size(); if (strings_count == 0) { return make_empty_column(type_id::STRING); } if (repeat_times <= 0) { // If the number of repetitions is not positive, each row of the output strings column will be // either an empty string (if the input row is not null), or a null (if the input row is null). return generate_empty_output(input, strings_count, stream, mr); } // If `repeat_times == 1`, just make a copy of the input. if (repeat_times == 1) { return std::make_unique<column>(input.parent(), stream, mr); } auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const fn = compute_size_and_repeat_fn{*strings_dv_ptr, repeat_times, input.has_nulls()}; auto [offsets_column, chars_column] = make_strings_children(fn, strings_count * repeat_times, strings_count, stream, mr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input.parent(), stream, mr)); } namespace { /** * @brief Functor to compute string sizes and repeat the input strings, each string is repeated by a * separate number of times. */ template <class Iterator> struct compute_size_and_repeat_separately_fn { column_device_view const strings_dv; column_device_view const repeat_times_dv; Iterator const repeat_times_iter; bool const strings_has_nulls; bool const rtimes_has_nulls; offset_type* d_offsets{nullptr}; // If d_chars == nullptr: only compute sizes of the output strings. // If d_chars != nullptr: only repeat strings. char* d_chars{nullptr}; __device__ int64_t operator()(size_type const idx) const noexcept { auto const string_is_valid = !strings_has_nulls || strings_dv.is_valid_nocheck(idx); auto const rtimes_is_valid = !rtimes_has_nulls || repeat_times_dv.is_valid_nocheck(idx); // Any null input (either string or repeat_times value) will result in a null output. auto const is_valid = string_is_valid && rtimes_is_valid; // When the input string is null, `repeat_times` and `string_size` are also set to 0. // This makes sure that if `repeat_times > 0` then we will always have a valid input string, // and if `repeat_times <= 0` we will never copy anything to the output. auto const repeat_times = is_valid ? repeat_times_iter[idx] : size_type{0}; auto const string_size = is_valid ? strings_dv.element<string_view>(idx).size_bytes() : size_type{0}; // The output_size is returned, and it needs to be an int64_t number to prevent overflow. auto const output_size = repeat_times > 0 ? static_cast<int64_t>(repeat_times) * static_cast<int64_t>(string_size) : int64_t{0}; if (!d_chars) { // If overflow happen, the stored value of output string size will be incorrect due to // downcasting. In such cases, the entire output string size array should be discarded. d_offsets[idx] = static_cast<offset_type>(output_size); } else if (repeat_times > 0 && string_size > 0) { auto const d_str = strings_dv.element<string_view>(idx); auto const input_ptr = d_str.data(); auto output_ptr = d_chars + d_offsets[idx]; for (size_type repeat_idx = 0; repeat_idx < repeat_times; ++repeat_idx) { output_ptr = copy_and_increment(output_ptr, input_ptr, string_size); } } // The output_size value may be used to sum up to detect overflow at the caller site. // The caller can detect overflow easily by checking `SUM(output_size) > INT_MAX`. return output_size; } }; /** * @brief Creates child offsets and chars columns by applying the template function that * can be used for computing the output size of each string as well as create the output. * * This function is similar to `strings::detail::make_strings_children`, except that it accepts an * optional input `std::optional<column_view>` that can contain the precomputed sizes of the output * strings. */ template <typename Func> auto make_strings_children(Func fn, size_type exec_size, size_type strings_count, std::optional<column_view> output_strings_sizes, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto offsets_column = make_numeric_column( data_type{type_id::INT32}, strings_count + 1, mask_state::UNALLOCATED, stream, mr); auto offsets_view = offsets_column->mutable_view(); auto d_offsets = offsets_view.template data<size_type>(); fn.d_offsets = d_offsets; // This may be called twice -- once for offsets and once for chars. auto for_each_fn = [exec_size, stream](Func& fn) { thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), exec_size, fn); }; if (!output_strings_sizes.has_value()) { // Compute the output sizes only if they are not given. for_each_fn(fn); // Compute the offsets values. thrust::exclusive_scan( rmm::exec_policy(stream), d_offsets, d_offsets + strings_count + 1, d_offsets); } else { // Compute the offsets values from the provided output string sizes. auto const string_sizes = output_strings_sizes.value(); CUDA_TRY(hipMemsetAsync(d_offsets, 0, sizeof(offset_type), stream.value())); thrust::inclusive_scan(rmm::exec_policy(stream), string_sizes.template begin<size_type>(), string_sizes.template end<size_type>(), d_offsets + 1); } // Now build the chars column auto const bytes = cudf::detail::get_value<size_type>(offsets_view, strings_count, stream); auto chars_column = create_chars_child_column(bytes, stream, mr); // Execute the function fn again to fill the chars column. // Note that if the output chars column has zero size, the function fn should not be called to // avoid accidentally overwriting the offsets. if (bytes > 0) { fn.d_chars = chars_column->mutable_view().template data<char>(); for_each_fn(fn); } return std::make_pair(std::move(offsets_column), std::move(chars_column)); } } // namespace std::unique_ptr<column> repeat_strings(strings_column_view const& input, column_view const& repeat_times, std::optional<column_view> output_strings_sizes, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.size() == repeat_times.size(), "The input columns must have the same size."); CUDF_EXPECTS(cudf::is_index_type(repeat_times.type()), "repeat_strings expects an integer type for the `repeat_times` input column."); if (output_strings_sizes.has_value()) { auto const output_sizes = output_strings_sizes.value(); CUDF_EXPECTS(input.size() == output_sizes.size() && (!output_sizes.nullable() || !output_sizes.has_nulls()), "The given column of output string sizes is invalid."); } auto const strings_count = input.size(); if (strings_count == 0) { return make_empty_column(type_id::STRING); } auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const repeat_times_dv_ptr = column_device_view::create(repeat_times, stream); auto const strings_has_nulls = input.has_nulls(); auto const rtimes_has_nulls = repeat_times.has_nulls(); auto const repeat_times_iter = cudf::detail::indexalator_factory::make_input_iterator(repeat_times); auto const fn = compute_size_and_repeat_separately_fn<decltype(repeat_times_iter)>{ *strings_dv_ptr, *repeat_times_dv_ptr, repeat_times_iter, strings_has_nulls, rtimes_has_nulls}; auto [offsets_column, chars_column] = make_strings_children(fn, strings_count, strings_count, output_strings_sizes, stream, mr); // We generate new bitmask by AND of the input columns' bitmasks. // Note that if the input columns are nullable, the output column will also be nullable (which may // not have nulls). auto [null_mask, null_count] = cudf::detail::bitmask_and(table_view{{input.parent(), repeat_times}}, stream, mr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask)); } std::pair<std::unique_ptr<column>, int64_t> repeat_strings_output_sizes( strings_column_view const& input, column_view const& repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.size() == repeat_times.size(), "The input columns must have the same size."); CUDF_EXPECTS( cudf::is_index_type(repeat_times.type()), "repeat_strings_output_sizes expects an integer type for the `repeat_times` input column."); auto const strings_count = input.size(); if (strings_count == 0) { return std::make_pair(make_empty_column(type_to_id<size_type>()), int64_t{0}); } auto output_sizes = make_numeric_column( data_type{type_to_id<size_type>()}, strings_count, mask_state::UNALLOCATED, stream, mr); auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const repeat_times_dv_ptr = column_device_view::create(repeat_times, stream); auto const strings_has_nulls = input.has_nulls(); auto const rtimes_has_nulls = repeat_times.has_nulls(); auto const repeat_times_iter = cudf::detail::indexalator_factory::make_input_iterator(repeat_times); auto const fn = compute_size_and_repeat_separately_fn<decltype(repeat_times_iter)>{ *strings_dv_ptr, *repeat_times_dv_ptr, repeat_times_iter, strings_has_nulls, rtimes_has_nulls, output_sizes->mutable_view().template begin<size_type>()}; auto const total_bytes = thrust::transform_reduce(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), fn, int64_t{0}, thrust::plus{}); return std::make_pair(std::move(output_sizes), total_bytes); } } // namespace detail std::unique_ptr<string_scalar> repeat_string(string_scalar const& input, size_type repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_string(input, repeat_times, rmm::cuda_stream_default, mr); } std::unique_ptr<column> repeat_strings(strings_column_view const& input, size_type repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings(input, repeat_times, rmm::cuda_stream_default, mr); } std::unique_ptr<column> repeat_strings(strings_column_view const& input, column_view const& repeat_times, std::optional<column_view> output_strings_sizes, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings( input, repeat_times, output_strings_sizes, rmm::cuda_stream_default, mr); } std::pair<std::unique_ptr<column>, int64_t> repeat_strings_output_sizes( strings_column_view const& input, column_view const& repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings_output_sizes(input, repeat_times, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
68da26ea1e220cb6f753a024ccf125b4a3b12b20.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/indexalator.cuh> #include <cudf/detail/null_mask.hpp> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/utilities.cuh> #include <cudf/strings/repeat_strings.hpp> #include <cudf/strings/strings_column_view.hpp> #include <cudf/utilities/error.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/functional.h> #include <thrust/transform.h> #include <thrust/transform_reduce.h> namespace cudf { namespace strings { namespace detail { std::unique_ptr<string_scalar> repeat_string(string_scalar const& input, size_type repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { if (!input.is_valid(stream)) { return std::make_unique<string_scalar>("", false, stream, mr); } if (input.size() == 0 || repeat_times <= 0) { return std::make_unique<string_scalar>("", true, stream, mr); } if (repeat_times == 1) { return std::make_unique<string_scalar>(input, stream, mr); } CUDF_EXPECTS(input.size() <= std::numeric_limits<size_type>::max() / repeat_times, "The output string has size that exceeds the maximum allowed size."); auto const str_size = input.size(); auto const iter = thrust::make_counting_iterator(0); auto buff = rmm::device_buffer(repeat_times * input.size(), stream, mr); // Pull data from the input string into each byte of the output string. thrust::transform(rmm::exec_policy(stream), iter, iter + repeat_times * str_size, static_cast<char*>(buff.data()), [in_ptr = input.data(), str_size] __device__(const auto idx) { return in_ptr[idx % str_size]; }); return std::make_unique<string_scalar>(std::move(buff)); } namespace { /** * @brief Generate a strings column in which each row is an empty string or a null. * * The output strings column has the same bitmask as the input column. */ auto generate_empty_output(strings_column_view const& input, size_type strings_count, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto chars_column = create_chars_child_column(0, stream, mr); auto offsets_column = make_numeric_column( data_type{type_to_id<offset_type>()}, strings_count + 1, mask_state::UNALLOCATED, stream, mr); CUDA_TRY(cudaMemsetAsync(offsets_column->mutable_view().template data<offset_type>(), 0, offsets_column->size() * sizeof(offset_type), stream.value())); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input.parent(), stream, mr)); } /** * @brief Functor to compute output string sizes and repeat the input strings. * * This functor is called only when `repeat_times > 0`. In addition, the total number of threads * running this functor is `repeat_times * strings_count` (instead of `string_count`) for maximizing * parallelism and better load-balancing. */ struct compute_size_and_repeat_fn { column_device_view const strings_dv; size_type const repeat_times; bool const has_nulls; offset_type* d_offsets{nullptr}; // If d_chars == nullptr: only compute sizes of the output strings. // If d_chars != nullptr: only repeat strings. char* d_chars{nullptr}; // `idx` will be in the range of [0, repeat_times * strings_count). __device__ void operator()(size_type const idx) const noexcept { auto const str_idx = idx / repeat_times; // value cycles in [0, string_count) auto const repeat_idx = idx % repeat_times; // value cycles in [0, repeat_times) auto const is_valid = !has_nulls || strings_dv.is_valid_nocheck(str_idx); if (!d_chars && repeat_idx == 0) { d_offsets[str_idx] = is_valid ? repeat_times * strings_dv.element<string_view>(str_idx).size_bytes() : 0; } // Each input string will be copied by `repeat_times` threads into the output string. if (d_chars && is_valid) { auto const d_str = strings_dv.element<string_view>(str_idx); auto const str_size = d_str.size_bytes(); if (str_size > 0) { auto const input_ptr = d_str.data(); auto const output_ptr = d_chars + d_offsets[str_idx] + repeat_idx * str_size; std::memcpy(output_ptr, input_ptr, str_size); } } } }; } // namespace std::unique_ptr<column> repeat_strings(strings_column_view const& input, size_type repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto const strings_count = input.size(); if (strings_count == 0) { return make_empty_column(type_id::STRING); } if (repeat_times <= 0) { // If the number of repetitions is not positive, each row of the output strings column will be // either an empty string (if the input row is not null), or a null (if the input row is null). return generate_empty_output(input, strings_count, stream, mr); } // If `repeat_times == 1`, just make a copy of the input. if (repeat_times == 1) { return std::make_unique<column>(input.parent(), stream, mr); } auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const fn = compute_size_and_repeat_fn{*strings_dv_ptr, repeat_times, input.has_nulls()}; auto [offsets_column, chars_column] = make_strings_children(fn, strings_count * repeat_times, strings_count, stream, mr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), input.null_count(), cudf::detail::copy_bitmask(input.parent(), stream, mr)); } namespace { /** * @brief Functor to compute string sizes and repeat the input strings, each string is repeated by a * separate number of times. */ template <class Iterator> struct compute_size_and_repeat_separately_fn { column_device_view const strings_dv; column_device_view const repeat_times_dv; Iterator const repeat_times_iter; bool const strings_has_nulls; bool const rtimes_has_nulls; offset_type* d_offsets{nullptr}; // If d_chars == nullptr: only compute sizes of the output strings. // If d_chars != nullptr: only repeat strings. char* d_chars{nullptr}; __device__ int64_t operator()(size_type const idx) const noexcept { auto const string_is_valid = !strings_has_nulls || strings_dv.is_valid_nocheck(idx); auto const rtimes_is_valid = !rtimes_has_nulls || repeat_times_dv.is_valid_nocheck(idx); // Any null input (either string or repeat_times value) will result in a null output. auto const is_valid = string_is_valid && rtimes_is_valid; // When the input string is null, `repeat_times` and `string_size` are also set to 0. // This makes sure that if `repeat_times > 0` then we will always have a valid input string, // and if `repeat_times <= 0` we will never copy anything to the output. auto const repeat_times = is_valid ? repeat_times_iter[idx] : size_type{0}; auto const string_size = is_valid ? strings_dv.element<string_view>(idx).size_bytes() : size_type{0}; // The output_size is returned, and it needs to be an int64_t number to prevent overflow. auto const output_size = repeat_times > 0 ? static_cast<int64_t>(repeat_times) * static_cast<int64_t>(string_size) : int64_t{0}; if (!d_chars) { // If overflow happen, the stored value of output string size will be incorrect due to // downcasting. In such cases, the entire output string size array should be discarded. d_offsets[idx] = static_cast<offset_type>(output_size); } else if (repeat_times > 0 && string_size > 0) { auto const d_str = strings_dv.element<string_view>(idx); auto const input_ptr = d_str.data(); auto output_ptr = d_chars + d_offsets[idx]; for (size_type repeat_idx = 0; repeat_idx < repeat_times; ++repeat_idx) { output_ptr = copy_and_increment(output_ptr, input_ptr, string_size); } } // The output_size value may be used to sum up to detect overflow at the caller site. // The caller can detect overflow easily by checking `SUM(output_size) > INT_MAX`. return output_size; } }; /** * @brief Creates child offsets and chars columns by applying the template function that * can be used for computing the output size of each string as well as create the output. * * This function is similar to `strings::detail::make_strings_children`, except that it accepts an * optional input `std::optional<column_view>` that can contain the precomputed sizes of the output * strings. */ template <typename Func> auto make_strings_children(Func fn, size_type exec_size, size_type strings_count, std::optional<column_view> output_strings_sizes, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { auto offsets_column = make_numeric_column( data_type{type_id::INT32}, strings_count + 1, mask_state::UNALLOCATED, stream, mr); auto offsets_view = offsets_column->mutable_view(); auto d_offsets = offsets_view.template data<size_type>(); fn.d_offsets = d_offsets; // This may be called twice -- once for offsets and once for chars. auto for_each_fn = [exec_size, stream](Func& fn) { thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), exec_size, fn); }; if (!output_strings_sizes.has_value()) { // Compute the output sizes only if they are not given. for_each_fn(fn); // Compute the offsets values. thrust::exclusive_scan( rmm::exec_policy(stream), d_offsets, d_offsets + strings_count + 1, d_offsets); } else { // Compute the offsets values from the provided output string sizes. auto const string_sizes = output_strings_sizes.value(); CUDA_TRY(cudaMemsetAsync(d_offsets, 0, sizeof(offset_type), stream.value())); thrust::inclusive_scan(rmm::exec_policy(stream), string_sizes.template begin<size_type>(), string_sizes.template end<size_type>(), d_offsets + 1); } // Now build the chars column auto const bytes = cudf::detail::get_value<size_type>(offsets_view, strings_count, stream); auto chars_column = create_chars_child_column(bytes, stream, mr); // Execute the function fn again to fill the chars column. // Note that if the output chars column has zero size, the function fn should not be called to // avoid accidentally overwriting the offsets. if (bytes > 0) { fn.d_chars = chars_column->mutable_view().template data<char>(); for_each_fn(fn); } return std::make_pair(std::move(offsets_column), std::move(chars_column)); } } // namespace std::unique_ptr<column> repeat_strings(strings_column_view const& input, column_view const& repeat_times, std::optional<column_view> output_strings_sizes, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.size() == repeat_times.size(), "The input columns must have the same size."); CUDF_EXPECTS(cudf::is_index_type(repeat_times.type()), "repeat_strings expects an integer type for the `repeat_times` input column."); if (output_strings_sizes.has_value()) { auto const output_sizes = output_strings_sizes.value(); CUDF_EXPECTS(input.size() == output_sizes.size() && (!output_sizes.nullable() || !output_sizes.has_nulls()), "The given column of output string sizes is invalid."); } auto const strings_count = input.size(); if (strings_count == 0) { return make_empty_column(type_id::STRING); } auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const repeat_times_dv_ptr = column_device_view::create(repeat_times, stream); auto const strings_has_nulls = input.has_nulls(); auto const rtimes_has_nulls = repeat_times.has_nulls(); auto const repeat_times_iter = cudf::detail::indexalator_factory::make_input_iterator(repeat_times); auto const fn = compute_size_and_repeat_separately_fn<decltype(repeat_times_iter)>{ *strings_dv_ptr, *repeat_times_dv_ptr, repeat_times_iter, strings_has_nulls, rtimes_has_nulls}; auto [offsets_column, chars_column] = make_strings_children(fn, strings_count, strings_count, output_strings_sizes, stream, mr); // We generate new bitmask by AND of the input columns' bitmasks. // Note that if the input columns are nullable, the output column will also be nullable (which may // not have nulls). auto [null_mask, null_count] = cudf::detail::bitmask_and(table_view{{input.parent(), repeat_times}}, stream, mr); return make_strings_column(strings_count, std::move(offsets_column), std::move(chars_column), null_count, std::move(null_mask)); } std::pair<std::unique_ptr<column>, int64_t> repeat_strings_output_sizes( strings_column_view const& input, column_view const& repeat_times, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(input.size() == repeat_times.size(), "The input columns must have the same size."); CUDF_EXPECTS( cudf::is_index_type(repeat_times.type()), "repeat_strings_output_sizes expects an integer type for the `repeat_times` input column."); auto const strings_count = input.size(); if (strings_count == 0) { return std::make_pair(make_empty_column(type_to_id<size_type>()), int64_t{0}); } auto output_sizes = make_numeric_column( data_type{type_to_id<size_type>()}, strings_count, mask_state::UNALLOCATED, stream, mr); auto const strings_dv_ptr = column_device_view::create(input.parent(), stream); auto const repeat_times_dv_ptr = column_device_view::create(repeat_times, stream); auto const strings_has_nulls = input.has_nulls(); auto const rtimes_has_nulls = repeat_times.has_nulls(); auto const repeat_times_iter = cudf::detail::indexalator_factory::make_input_iterator(repeat_times); auto const fn = compute_size_and_repeat_separately_fn<decltype(repeat_times_iter)>{ *strings_dv_ptr, *repeat_times_dv_ptr, repeat_times_iter, strings_has_nulls, rtimes_has_nulls, output_sizes->mutable_view().template begin<size_type>()}; auto const total_bytes = thrust::transform_reduce(rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), fn, int64_t{0}, thrust::plus{}); return std::make_pair(std::move(output_sizes), total_bytes); } } // namespace detail std::unique_ptr<string_scalar> repeat_string(string_scalar const& input, size_type repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_string(input, repeat_times, rmm::cuda_stream_default, mr); } std::unique_ptr<column> repeat_strings(strings_column_view const& input, size_type repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings(input, repeat_times, rmm::cuda_stream_default, mr); } std::unique_ptr<column> repeat_strings(strings_column_view const& input, column_view const& repeat_times, std::optional<column_view> output_strings_sizes, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings( input, repeat_times, output_strings_sizes, rmm::cuda_stream_default, mr); } std::pair<std::unique_ptr<column>, int64_t> repeat_strings_output_sizes( strings_column_view const& input, column_view const& repeat_times, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat_strings_output_sizes(input, repeat_times, rmm::cuda_stream_default, mr); } } // namespace strings } // namespace cudf
afcdd5335912da76d4dbfc43d8a26ca37d0bf934.hip
// !!! This is a file automatically generated by hipify!!! #include "cuda-error.cuh" #include <iostream> constexpr bool FAILURE = 1; constexpr bool SUCCESS = 0; bool _CudaErrorCheck(hipError_t const& error) { if (error != hipSuccess) { std::cerr << hipGetErrorName(error) << "> " << hipGetErrorString(error) << '\n'; return FAILURE; } return SUCCESS; }
afcdd5335912da76d4dbfc43d8a26ca37d0bf934.cu
#include "cuda-error.cuh" #include <iostream> constexpr bool FAILURE = 1; constexpr bool SUCCESS = 0; bool _CudaErrorCheck(cudaError_t const& error) { if (error != cudaSuccess) { std::cerr << cudaGetErrorName(error) << "> " << cudaGetErrorString(error) << '\n'; return FAILURE; } return SUCCESS; }
27c5de5bfdd93f53447527aca1ccab0b53536ba7.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <sstream> #include <vector> #include <NvInfer.h> #include <opencv2/opencv.hpp> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <opencv2/highgui/highgui.hpp> using namespace std; using namespace nvinfer1; class Logger : public ILogger { void log(Severity severity, const char * msg) override { if (severity != Severity::kINFO) cout << msg << endl; } } gLogger; void cvImageToTensor(const cv::Mat & image, float *tensor, nvinfer1::Dims dimensions) { const size_t channels = dimensions.d[0]; const size_t height = dimensions.d[1]; const size_t width = dimensions.d[2]; const size_t stridesCv[3] = { width * channels, channels, 1 }; const size_t strides[3] = { height * width, width, 1 }; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { for (int k = 0; k < channels; k++) { const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2]; const size_t offset = k * strides[0] + i * strides[1] + j * strides[2]; tensor[offset] = (float) image.data[offsetCv]; } } } } size_t numTensorElements(nvinfer1::Dims dimensions) { if (dimensions.nbDims == 0) return 0; size_t size = 1; for (int i = 0; i < dimensions.nbDims; i++) size *= dimensions.d[i]; return size; } float argmax(float *tensor, nvinfer1::Dims dimensions) { size_t max_ind=0; size_t i=0; size_t numel=numTensorElements(dimensions); for(; i<numel; i++) { cout<<i<<endl; cout<<*(tensor+i)<<endl; if( (*(tensor+i)) > (*(tensor+max_ind)) ) max_ind=i ; } return max_ind; } int main() { string imageFilename = "../dataset/S_test.jpg"; string planFilename="../TensorRT_Inference/Engine/engine.plan"; string inputnodeName="x"; string outputnodeName="logits/BiasAdd"; string classes_names="../classes_names.txt"; //getting the classes names vector<string> classes; ifstream ReadFile; ReadFile.open(classes_names); string str; if (ReadFile.is_open()) { while(!ReadFile.eof()) { getline(ReadFile,str); classes.push_back(str); } } classes.pop_back(); for(int i=0; i<classes.size(); i++) { cout<<i<<endl; cout<<classes[i]<<endl; } //Load the engine cout<<"Loading The TensorRT engine from plan file"<<endl; ifstream planFile(planFilename); if(!planFile.is_open()) {cout<<"Could not open plan file."<<endl; return 1;} stringstream planBuffer; planBuffer << planFile.rdbuf(); string plan=planBuffer.str(); //Create a runtime object to deserialize inference engine IRuntime* runtime=createInferRuntime(gLogger); ICudaEngine* engine= runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr); // Create space to store intermediate activation values IExecutionContext *context = engine->createExecutionContext(); //Get the input / output dimensions int inputBindingIndex, outputBindingIndex; inputBindingIndex = engine->getBindingIndex(inputnodeName.c_str()); outputBindingIndex = engine->getBindingIndex(outputnodeName.c_str()); if(inputBindingIndex < 0) {cout << "Invalid input name." << endl; return 1;} if(outputBindingIndex < 0) {cout << "invalid output name." << endl; return 1;} Dims inputDims, outputDims; inputDims = engine->getBindingDimensions(inputBindingIndex); outputDims = engine->getBindingDimensions(outputBindingIndex); int inputWidth, inputHeight; inputHeight = inputDims.d[1]; inputWidth = inputDims.d[2]; //Read image convert color and resize cout << "Preprocessing input ..." << endl; cv::Mat image = cv::imread(imageFilename,CV_LOAD_IMAGE_COLOR); cv::namedWindow( "Display window", CV_WINDOW_AUTOSIZE ); if(image.data == NULL ) { cout << "Could not read image from file." << endl; return 1;} //cv::cvtColor(image, image, cv::COLOR_RGB2BGR, 3); cv::resize(image, image, cv::Size(inputWidth, inputHeight));//, cv::INTER_CUBIC); //image.convertTo(image, CV_32FC3); cv::imshow("Display window", image); //Convert from uint8+NHWC to float+NCHW float *inputDataHost, *outputDataHost; size_t numInput, numOutput; numInput = numTensorElements(inputDims); numOutput = numTensorElements(outputDims); inputDataHost = (float*) malloc(numInput * sizeof(float)); outputDataHost = (float*) malloc(numOutput * sizeof(float)); cvImageToTensor(image, inputDataHost, inputDims); //Transfer to device float *inputDataDevice, *outputDataDevice; hipMalloc(&inputDataDevice, numInput * sizeof(float)); hipMalloc(&outputDataDevice, numOutput * sizeof(float)); hipMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), hipMemcpyHostToDevice); void *bindings[2]; bindings[inputBindingIndex] = (void*) inputDataDevice; bindings[outputBindingIndex] = (void*) outputDataDevice; //Execute engine cout << "Executing inference engine ..." << endl; const int kBatchSize = 1; context->execute(kBatchSize, bindings); //Transfer output back to host hipMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), hipMemcpyDeviceToHost); /* parse output */ // vector<size_t> sortedIndices = argsort(outputDataHost, outputDims); //cout << "\nThe top-5 indices are: "; // for (int i = 0; i < 5; i++) // cout << sortedIndices[i] << " "; //Read Output cout<<"The prediction is :" << classes[argmax(outputDataHost,outputDims)] << endl; //clean up cv::waitKey(0); runtime->destroy(); engine->destroy(); context->destroy(); free(inputDataHost); free(outputDataHost); hipFree(inputDataDevice); hipFree(outputDataDevice); return 0; }
27c5de5bfdd93f53447527aca1ccab0b53536ba7.cu
#include <iostream> #include <fstream> #include <sstream> #include <vector> #include <NvInfer.h> #include <opencv2/opencv.hpp> #include <cuda_runtime_api.h> #include <cuda.h> #include <opencv2/highgui/highgui.hpp> using namespace std; using namespace nvinfer1; class Logger : public ILogger { void log(Severity severity, const char * msg) override { if (severity != Severity::kINFO) cout << msg << endl; } } gLogger; void cvImageToTensor(const cv::Mat & image, float *tensor, nvinfer1::Dims dimensions) { const size_t channels = dimensions.d[0]; const size_t height = dimensions.d[1]; const size_t width = dimensions.d[2]; const size_t stridesCv[3] = { width * channels, channels, 1 }; const size_t strides[3] = { height * width, width, 1 }; for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { for (int k = 0; k < channels; k++) { const size_t offsetCv = i * stridesCv[0] + j * stridesCv[1] + k * stridesCv[2]; const size_t offset = k * strides[0] + i * strides[1] + j * strides[2]; tensor[offset] = (float) image.data[offsetCv]; } } } } size_t numTensorElements(nvinfer1::Dims dimensions) { if (dimensions.nbDims == 0) return 0; size_t size = 1; for (int i = 0; i < dimensions.nbDims; i++) size *= dimensions.d[i]; return size; } float argmax(float *tensor, nvinfer1::Dims dimensions) { size_t max_ind=0; size_t i=0; size_t numel=numTensorElements(dimensions); for(; i<numel; i++) { cout<<i<<endl; cout<<*(tensor+i)<<endl; if( (*(tensor+i)) > (*(tensor+max_ind)) ) max_ind=i ; } return max_ind; } int main() { string imageFilename = "../dataset/S_test.jpg"; string planFilename="../TensorRT_Inference/Engine/engine.plan"; string inputnodeName="x"; string outputnodeName="logits/BiasAdd"; string classes_names="../classes_names.txt"; //getting the classes names vector<string> classes; ifstream ReadFile; ReadFile.open(classes_names); string str; if (ReadFile.is_open()) { while(!ReadFile.eof()) { getline(ReadFile,str); classes.push_back(str); } } classes.pop_back(); for(int i=0; i<classes.size(); i++) { cout<<i<<endl; cout<<classes[i]<<endl; } //Load the engine cout<<"Loading The TensorRT engine from plan file"<<endl; ifstream planFile(planFilename); if(!planFile.is_open()) {cout<<"Could not open plan file."<<endl; return 1;} stringstream planBuffer; planBuffer << planFile.rdbuf(); string plan=planBuffer.str(); //Create a runtime object to deserialize inference engine IRuntime* runtime=createInferRuntime(gLogger); ICudaEngine* engine= runtime->deserializeCudaEngine((void*)plan.data(), plan.size(), nullptr); // Create space to store intermediate activation values IExecutionContext *context = engine->createExecutionContext(); //Get the input / output dimensions int inputBindingIndex, outputBindingIndex; inputBindingIndex = engine->getBindingIndex(inputnodeName.c_str()); outputBindingIndex = engine->getBindingIndex(outputnodeName.c_str()); if(inputBindingIndex < 0) {cout << "Invalid input name." << endl; return 1;} if(outputBindingIndex < 0) {cout << "invalid output name." << endl; return 1;} Dims inputDims, outputDims; inputDims = engine->getBindingDimensions(inputBindingIndex); outputDims = engine->getBindingDimensions(outputBindingIndex); int inputWidth, inputHeight; inputHeight = inputDims.d[1]; inputWidth = inputDims.d[2]; //Read image convert color and resize cout << "Preprocessing input ..." << endl; cv::Mat image = cv::imread(imageFilename,CV_LOAD_IMAGE_COLOR); cv::namedWindow( "Display window", CV_WINDOW_AUTOSIZE ); if(image.data == NULL ) { cout << "Could not read image from file." << endl; return 1;} //cv::cvtColor(image, image, cv::COLOR_RGB2BGR, 3); cv::resize(image, image, cv::Size(inputWidth, inputHeight));//, cv::INTER_CUBIC); //image.convertTo(image, CV_32FC3); cv::imshow("Display window", image); //Convert from uint8+NHWC to float+NCHW float *inputDataHost, *outputDataHost; size_t numInput, numOutput; numInput = numTensorElements(inputDims); numOutput = numTensorElements(outputDims); inputDataHost = (float*) malloc(numInput * sizeof(float)); outputDataHost = (float*) malloc(numOutput * sizeof(float)); cvImageToTensor(image, inputDataHost, inputDims); //Transfer to device float *inputDataDevice, *outputDataDevice; cudaMalloc(&inputDataDevice, numInput * sizeof(float)); cudaMalloc(&outputDataDevice, numOutput * sizeof(float)); cudaMemcpy(inputDataDevice, inputDataHost, numInput * sizeof(float), cudaMemcpyHostToDevice); void *bindings[2]; bindings[inputBindingIndex] = (void*) inputDataDevice; bindings[outputBindingIndex] = (void*) outputDataDevice; //Execute engine cout << "Executing inference engine ..." << endl; const int kBatchSize = 1; context->execute(kBatchSize, bindings); //Transfer output back to host cudaMemcpy(outputDataHost, outputDataDevice, numOutput * sizeof(float), cudaMemcpyDeviceToHost); /* parse output */ // vector<size_t> sortedIndices = argsort(outputDataHost, outputDims); //cout << "\nThe top-5 indices are: "; // for (int i = 0; i < 5; i++) // cout << sortedIndices[i] << " "; //Read Output cout<<"The prediction is :" << classes[argmax(outputDataHost,outputDims)] << endl; //clean up cv::waitKey(0); runtime->destroy(); engine->destroy(); context->destroy(); free(inputDataHost); free(outputDataHost); cudaFree(inputDataDevice); cudaFree(outputDataDevice); return 0; }
112c55b114b89280e4a11592914960a132cfe44c.hip
// !!! This is a file automatically generated by hipify!!! // CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(hipDeviceProp_t devProp) { printf("Name : %s\n", devProp.name); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %u\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %u\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; hipGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices devCount = 4; for (int i = 0; i < devCount; ++i) { hipSetDevice(i); // Get device properties printf("\nCUDA Device #%d\n", i); hipDeviceProp_t devProp; hipGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
112c55b114b89280e4a11592914960a132cfe44c.cu
// CUDA Device Query #include <stdio.h> // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Name : %s\n", devProp.name); printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %u\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %u\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices devCount = 4; for (int i = 0; i < devCount; ++i) { cudaSetDevice(i); // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
033d5f8b49656833238aab0355c0f08a553d5487.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void verifyCollatz(int64_t maxNumber) { int timesToRunGrid = maxNumber / (blockDim.x * gridDim.x) + 1; int64_t number = 0; int64_t i = 0; for (int64_t gridRunNumber = 0; gridRunNumber < timesToRunGrid; ++gridRunNumber) { // odd numbers only number = 2 * (blockDim.x * gridDim.x * gridRunNumber + blockDim.x * blockIdx.x + threadIdx.x) + 1; i = number; if (number > 2 && number < maxNumber) { while (i >= number) { if (i & 0x1) { /* odd case */ i = i * 3 + 1; } else { /* even case */ i = i >> 1; } } } } } /** * Host main routine */ int main() { // Error code to check return values for CUDA calls hipError_t err = hipSuccess; int64_t maxNumber = 256ll * 256ll * 256ll * 256ll; // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = 256; // use CUDA builtin heruistics to get max performance hipOccupancyMaxPotentialBlockSize( &blocksPerGrid, &threadsPerBlock, (void*) verifyCollatz, 0, 0); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); hipLaunchKernelGGL(( verifyCollatz), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, maxNumber); err = hipGetLastError(); hipDeviceSynchronize(); if (err != hipSuccess) { fprintf(stderr, "Failed to launch collatz kernel (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
033d5f8b49656833238aab0355c0f08a553d5487.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void verifyCollatz(int64_t maxNumber) { int timesToRunGrid = maxNumber / (blockDim.x * gridDim.x) + 1; int64_t number = 0; int64_t i = 0; for (int64_t gridRunNumber = 0; gridRunNumber < timesToRunGrid; ++gridRunNumber) { // odd numbers only number = 2 * (blockDim.x * gridDim.x * gridRunNumber + blockDim.x * blockIdx.x + threadIdx.x) + 1; i = number; if (number > 2 && number < maxNumber) { while (i >= number) { if (i & 0x1) { /* odd case */ i = i * 3 + 1; } else { /* even case */ i = i >> 1; } } } } } /** * Host main routine */ int main() { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; int64_t maxNumber = 256ll * 256ll * 256ll * 256ll; // Launch the Vector Add CUDA Kernel int threadsPerBlock = 256; int blocksPerGrid = 256; // use CUDA builtin heruistics to get max performance cudaOccupancyMaxPotentialBlockSize( &blocksPerGrid, &threadsPerBlock, (void*) verifyCollatz, 0, 0); printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); verifyCollatz<<<blocksPerGrid, threadsPerBlock>>>(maxNumber); err = cudaGetLastError(); cudaDeviceSynchronize(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch collatz kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
c034f8095780baf12f71eedd1db9d87b78c5f9c0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "box2d1r-512-16-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; double __reg_16_0; double __reg_16_1; double __reg_16_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC15(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid15) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC16(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid16) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_0); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(1, __reg_16_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(2, __reg_16_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(3, __reg_16_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(4, __reg_16_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(5, __reg_16_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(6, __reg_16_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(7, __reg_16_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(8, __reg_16_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(9, __reg_16_0); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(10, __reg_16_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(11, __reg_16_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(12, __reg_16_0); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(13, __reg_16_1); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(14, __reg_16_2); __LOAD(__reg_0, 31); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(15, __reg_16_0); __LOAD(__reg_0, 32); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(16, __reg_16_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __LOAD(__reg_0, 31); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __LOAD(__reg_0, 32); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(16, __reg_16_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_14_0 = __reg_13_0; __CALC15(__reg_15_1, __reg_15_1, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_15_0 = __reg_14_0; __CALC16(__reg_16_1, __reg_16_1, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_14_1 = __reg_13_1; __CALC15(__reg_15_2, __reg_15_2, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); __reg_15_1 = __reg_14_1; __CALC16(__reg_16_2, __reg_16_2, __reg_16_0, __reg_15_1); __STORE(__h + 0, __reg_16_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); __reg_14_2 = __reg_13_2; __CALC15(__reg_15_0, __reg_15_0, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h + 0, __reg_16_0); __reg_15_2 = __reg_14_2; __CALC16(__reg_16_0, __reg_16_0, __reg_16_1, __reg_15_2); __STORE(__h + 1, __reg_16_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } } __global__ void kernel0_15(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC15(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid15) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(1, __reg_15_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(2, __reg_15_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(3, __reg_15_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(4, __reg_15_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(5, __reg_15_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(6, __reg_15_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(7, __reg_15_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(8, __reg_15_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(9, __reg_15_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(10, __reg_15_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(11, __reg_15_2); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(12, __reg_15_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(13, __reg_15_1); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(14, __reg_15_2); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(15, __reg_15_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(15, __reg_15_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_14_1 = __reg_13_1; __CALC15(__reg_15_2, __reg_15_2, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); __reg_14_2 = __reg_13_2; __CALC15(__reg_15_0, __reg_15_0, __reg_15_1, __reg_14_2); __STORE(__h + 0, __reg_15_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h + 0, __reg_15_1); __reg_14_0 = __reg_13_0; __CALC15(__reg_15_1, __reg_15_1, __reg_15_2, __reg_14_0); __STORE(__h + 1, __reg_15_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; } } __global__ void kernel0_14(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(1, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(2, __reg_14_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(3, __reg_14_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(4, __reg_14_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(5, __reg_14_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(6, __reg_14_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(7, __reg_14_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(8, __reg_14_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(9, __reg_14_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(10, __reg_14_1); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(11, __reg_14_2); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(12, __reg_14_0); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(13, __reg_14_1); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(14, __reg_14_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(14, __reg_14_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __STORE(__h + 0, __reg_14_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h + 0, __reg_14_2); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __STORE(__h + 1, __reg_14_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } } __global__ void kernel0_13(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(1, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(2, __reg_13_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(3, __reg_13_0); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(4, __reg_13_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(5, __reg_13_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(6, __reg_13_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(7, __reg_13_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(8, __reg_13_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(9, __reg_13_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(10, __reg_13_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(11, __reg_13_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(12, __reg_13_0); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(13, __reg_13_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(13, __reg_13_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __STORE(__h + 0, __reg_13_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h + 0, __reg_13_0); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __STORE(__h + 1, __reg_13_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; } } __global__ void kernel0_12(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(1, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(2, __reg_12_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(3, __reg_12_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(4, __reg_12_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(5, __reg_12_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(6, __reg_12_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(7, __reg_12_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(8, __reg_12_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(9, __reg_12_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(10, __reg_12_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(11, __reg_12_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(12, __reg_12_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(12, __reg_12_0); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __STORE(__h + 0, __reg_12_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h + 0, __reg_12_1); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __STORE(__h + 1, __reg_12_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } } __global__ void kernel0_11(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(1, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(2, __reg_11_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(3, __reg_11_0); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(4, __reg_11_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(5, __reg_11_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(6, __reg_11_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(7, __reg_11_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(8, __reg_11_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(9, __reg_11_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(10, __reg_11_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(11, __reg_11_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(11, __reg_11_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __STORE(__h + 0, __reg_11_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h + 0, __reg_11_2); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __STORE(__h + 1, __reg_11_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; } } __global__ void kernel0_10(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(1, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(2, __reg_10_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(3, __reg_10_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(4, __reg_10_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(5, __reg_10_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(6, __reg_10_0); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(7, __reg_10_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(8, __reg_10_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(9, __reg_10_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(10, __reg_10_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(10, __reg_10_1); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __STORE(__h + 0, __reg_10_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h + 0, __reg_10_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __STORE(__h + 1, __reg_10_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(1, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(2, __reg_9_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(3, __reg_9_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(4, __reg_9_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(5, __reg_9_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(6, __reg_9_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(7, __reg_9_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(8, __reg_9_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(9, __reg_9_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(9, __reg_9_0); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __STORE(__h + 0, __reg_9_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h + 0, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __STORE(__h + 1, __reg_9_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(1, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(2, __reg_8_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(3, __reg_8_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(4, __reg_8_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(5, __reg_8_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(6, __reg_8_0); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(7, __reg_8_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(8, __reg_8_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(8, __reg_8_2); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __STORE(__h + 0, __reg_8_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h + 0, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __STORE(__h + 1, __reg_8_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(1, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(2, __reg_7_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(3, __reg_7_0); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(4, __reg_7_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(5, __reg_7_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(6, __reg_7_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(7, __reg_7_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(7, __reg_7_1); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __STORE(__h + 0, __reg_7_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h + 0, __reg_7_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __STORE(__h + 1, __reg_7_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(1, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(2, __reg_6_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(3, __reg_6_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(4, __reg_6_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(5, __reg_6_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(6, __reg_6_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(6, __reg_6_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __STORE(__h + 0, __reg_6_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h + 0, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __STORE(__h + 1, __reg_6_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(1, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(2, __reg_5_2); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(3, __reg_5_0); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(4, __reg_5_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(5, __reg_5_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(5, __reg_5_2); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __STORE(__h + 0, __reg_5_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h + 0, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __STORE(__h + 1, __reg_5_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(1, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(2, __reg_4_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(3, __reg_4_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(4, __reg_4_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(4, __reg_4_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __STORE(__h + 0, __reg_4_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h + 0, __reg_4_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __STORE(__h + 1, __reg_4_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(1, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(2, __reg_3_2); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(3, __reg_3_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(3, __reg_3_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __STORE(__h + 0, __reg_3_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h + 0, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __STORE(__h + 1, __reg_3_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(1, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(2, __reg_2_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(2, __reg_2_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __STORE(__h + 0, __reg_2_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h + 0, __reg_2_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __STORE(__h + 1, __reg_2_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(1, __reg_1_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(1, __reg_1_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; } }
c034f8095780baf12f71eedd1db9d87b78c5f9c0.cu
#include "box2d1r-512-16-512_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_16(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 16; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 480; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; double __reg_16_0; double __reg_16_1; double __reg_16_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __writeValid16 = __updateValid && __local_c2 >= (__halo2 * 16) && __local_c2 < __side2LenOl - (__halo2 * 16); const AN5D_TYPE __storeValid = __writeValid16; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC15(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid15) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC16(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid16) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_0); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(1, __reg_16_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(2, __reg_16_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(3, __reg_16_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(4, __reg_16_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(5, __reg_16_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(6, __reg_16_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(7, __reg_16_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(8, __reg_16_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(9, __reg_16_0); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(10, __reg_16_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(11, __reg_16_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(12, __reg_16_0); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(13, __reg_16_1); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(14, __reg_16_2); __LOAD(__reg_0, 31); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(15, __reg_16_0); __LOAD(__reg_0, 32); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(16, __reg_16_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __LOAD(__reg_0, 31); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __LOAD(__reg_0, 32); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(16, __reg_16_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 33; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_14_0 = __reg_13_0; __CALC15(__reg_15_1, __reg_15_1, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_15_0 = __reg_14_0; __CALC16(__reg_16_1, __reg_16_1, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_14_1 = __reg_13_1; __CALC15(__reg_15_2, __reg_15_2, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); __reg_15_1 = __reg_14_1; __CALC16(__reg_16_2, __reg_16_2, __reg_16_0, __reg_15_1); __STORE(__h + 0, __reg_16_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 15, __reg_16_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 14, __reg_16_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 13, __reg_16_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 12, __reg_16_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 11, __reg_16_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 10, __reg_16_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 9, __reg_16_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 8, __reg_16_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 7, __reg_16_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 6, __reg_16_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 5, __reg_16_1); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 4, __reg_16_2); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 3, __reg_16_0); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 2, __reg_16_1); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 1, __reg_16_2); __reg_14_2 = __reg_13_2; __CALC15(__reg_15_0, __reg_15_0, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h + 0, __reg_16_0); __reg_15_2 = __reg_14_2; __CALC16(__reg_16_0, __reg_16_0, __reg_16_1, __reg_15_2); __STORE(__h + 1, __reg_16_1); } } else { for (__h = 33; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __CALC16(__reg_16_1, __reg_16_0, __reg_16_2, __reg_15_0); __STORE(__h - 16, __reg_16_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __CALC16(__reg_16_2, __reg_16_1, __reg_16_0, __reg_15_1); __STORE(__h - 16, __reg_16_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __CALC16(__reg_16_0, __reg_16_2, __reg_16_1, __reg_15_2); __STORE(__h - 16, __reg_16_1); __h++; } } __global__ void kernel0_15(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 15; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 482; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; double __reg_15_0; double __reg_15_1; double __reg_15_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __writeValid15 = __updateValid && __local_c2 >= (__halo2 * 15) && __local_c2 < __side2LenOl - (__halo2 * 15); const AN5D_TYPE __storeValid = __writeValid15; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC15(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid15) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(1, __reg_15_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(2, __reg_15_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(3, __reg_15_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(4, __reg_15_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(5, __reg_15_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(6, __reg_15_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(7, __reg_15_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(8, __reg_15_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(9, __reg_15_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(10, __reg_15_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(11, __reg_15_2); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(12, __reg_15_0); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(13, __reg_15_1); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(14, __reg_15_2); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(15, __reg_15_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __LOAD(__reg_0, 29); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __LOAD(__reg_0, 30); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(15, __reg_15_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 31; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_14_1 = __reg_13_1; __CALC15(__reg_15_2, __reg_15_2, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); __reg_14_2 = __reg_13_2; __CALC15(__reg_15_0, __reg_15_0, __reg_15_1, __reg_14_2); __STORE(__h + 0, __reg_15_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 14, __reg_15_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 13, __reg_15_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 12, __reg_15_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 11, __reg_15_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 10, __reg_15_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 9, __reg_15_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 8, __reg_15_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 7, __reg_15_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 6, __reg_15_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 5, __reg_15_2); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 4, __reg_15_0); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 3, __reg_15_1); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 2, __reg_15_2); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 1, __reg_15_0); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h + 0, __reg_15_1); __reg_14_0 = __reg_13_0; __CALC15(__reg_15_1, __reg_15_1, __reg_15_2, __reg_14_0); __STORE(__h + 1, __reg_15_2); } } else { for (__h = 31; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __CALC15(__reg_15_0, __reg_15_2, __reg_15_1, __reg_14_2); __STORE(__h - 15, __reg_15_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __CALC15(__reg_15_1, __reg_15_0, __reg_15_2, __reg_14_0); __STORE(__h - 15, __reg_15_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __CALC15(__reg_15_2, __reg_15_1, __reg_15_0, __reg_14_1); __STORE(__h - 15, __reg_15_0); __h++; } } __global__ void kernel0_14(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 14; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 484; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; double __reg_14_0; double __reg_14_1; double __reg_14_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __writeValid14 = __updateValid && __local_c2 >= (__halo2 * 14) && __local_c2 < __side2LenOl - (__halo2 * 14); const AN5D_TYPE __storeValid = __writeValid14; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC14(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid14) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(1, __reg_14_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(2, __reg_14_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(3, __reg_14_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(4, __reg_14_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(5, __reg_14_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(6, __reg_14_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(7, __reg_14_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(8, __reg_14_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(9, __reg_14_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(10, __reg_14_1); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(11, __reg_14_2); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(12, __reg_14_0); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(13, __reg_14_1); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(14, __reg_14_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __LOAD(__reg_0, 27); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __LOAD(__reg_0, 28); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(14, __reg_14_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 29; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_13_2 = __reg_12_2; __CALC14(__reg_14_0, __reg_14_0, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); __reg_13_0 = __reg_12_0; __CALC14(__reg_14_1, __reg_14_1, __reg_14_2, __reg_13_0); __STORE(__h + 0, __reg_14_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 13, __reg_14_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 12, __reg_14_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 11, __reg_14_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 10, __reg_14_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 9, __reg_14_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 8, __reg_14_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 7, __reg_14_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 6, __reg_14_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 5, __reg_14_0); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 4, __reg_14_1); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 3, __reg_14_2); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 2, __reg_14_0); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 1, __reg_14_1); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h + 0, __reg_14_2); __reg_13_1 = __reg_12_1; __CALC14(__reg_14_2, __reg_14_2, __reg_14_0, __reg_13_1); __STORE(__h + 1, __reg_14_0); } } else { for (__h = 29; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __CALC14(__reg_14_2, __reg_14_1, __reg_14_0, __reg_13_1); __STORE(__h - 14, __reg_14_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __CALC14(__reg_14_0, __reg_14_2, __reg_14_1, __reg_13_2); __STORE(__h - 14, __reg_14_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __CALC14(__reg_14_1, __reg_14_0, __reg_14_2, __reg_13_0); __STORE(__h - 14, __reg_14_2); __h++; } } __global__ void kernel0_13(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 13; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 486; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; double __reg_13_0; double __reg_13_1; double __reg_13_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __writeValid13 = __updateValid && __local_c2 >= (__halo2 * 13) && __local_c2 < __side2LenOl - (__halo2 * 13); const AN5D_TYPE __storeValid = __writeValid13; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC13(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid13) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(1, __reg_13_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(2, __reg_13_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(3, __reg_13_0); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(4, __reg_13_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(5, __reg_13_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(6, __reg_13_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(7, __reg_13_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(8, __reg_13_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(9, __reg_13_0); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(10, __reg_13_1); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(11, __reg_13_2); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(12, __reg_13_0); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(13, __reg_13_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __LOAD(__reg_0, 25); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __LOAD(__reg_0, 26); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(13, __reg_13_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 27; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_12_0 = __reg_11_0; __CALC13(__reg_13_1, __reg_13_1, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); __reg_12_1 = __reg_11_1; __CALC13(__reg_13_2, __reg_13_2, __reg_13_0, __reg_12_1); __STORE(__h + 0, __reg_13_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 12, __reg_13_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 11, __reg_13_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 10, __reg_13_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 9, __reg_13_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 8, __reg_13_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 7, __reg_13_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 6, __reg_13_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 5, __reg_13_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 4, __reg_13_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 3, __reg_13_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 2, __reg_13_1); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 1, __reg_13_2); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h + 0, __reg_13_0); __reg_12_2 = __reg_11_2; __CALC13(__reg_13_0, __reg_13_0, __reg_13_1, __reg_12_2); __STORE(__h + 1, __reg_13_1); } } else { for (__h = 27; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __CALC13(__reg_13_1, __reg_13_0, __reg_13_2, __reg_12_0); __STORE(__h - 13, __reg_13_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __CALC13(__reg_13_2, __reg_13_1, __reg_13_0, __reg_12_1); __STORE(__h - 13, __reg_13_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __CALC13(__reg_13_0, __reg_13_2, __reg_13_1, __reg_12_2); __STORE(__h - 13, __reg_13_1); __h++; } } __global__ void kernel0_12(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 12; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 488; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; double __reg_12_0; double __reg_12_1; double __reg_12_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __writeValid12 = __updateValid && __local_c2 >= (__halo2 * 12) && __local_c2 < __side2LenOl - (__halo2 * 12); const AN5D_TYPE __storeValid = __writeValid12; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC12(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid12) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(1, __reg_12_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(2, __reg_12_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(3, __reg_12_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(4, __reg_12_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(5, __reg_12_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(6, __reg_12_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(7, __reg_12_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(8, __reg_12_2); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(9, __reg_12_0); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(10, __reg_12_1); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(11, __reg_12_2); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(12, __reg_12_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __LOAD(__reg_0, 23); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __LOAD(__reg_0, 24); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(12, __reg_12_0); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 25; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_11_1 = __reg_10_1; __CALC12(__reg_12_2, __reg_12_2, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); __reg_11_2 = __reg_10_2; __CALC12(__reg_12_0, __reg_12_0, __reg_12_1, __reg_11_2); __STORE(__h + 0, __reg_12_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 11, __reg_12_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 10, __reg_12_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 9, __reg_12_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 8, __reg_12_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 7, __reg_12_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 6, __reg_12_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 5, __reg_12_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 4, __reg_12_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 3, __reg_12_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 2, __reg_12_2); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 1, __reg_12_0); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h + 0, __reg_12_1); __reg_11_0 = __reg_10_0; __CALC12(__reg_12_1, __reg_12_1, __reg_12_2, __reg_11_0); __STORE(__h + 1, __reg_12_2); } } else { for (__h = 25; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __CALC12(__reg_12_0, __reg_12_2, __reg_12_1, __reg_11_2); __STORE(__h - 12, __reg_12_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __CALC12(__reg_12_1, __reg_12_0, __reg_12_2, __reg_11_0); __STORE(__h - 12, __reg_12_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __CALC12(__reg_12_2, __reg_12_1, __reg_12_0, __reg_11_1); __STORE(__h - 12, __reg_12_0); __h++; } } __global__ void kernel0_11(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 11; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 490; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; double __reg_11_0; double __reg_11_1; double __reg_11_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __writeValid11 = __updateValid && __local_c2 >= (__halo2 * 11) && __local_c2 < __side2LenOl - (__halo2 * 11); const AN5D_TYPE __storeValid = __writeValid11; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC11(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid11) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(1, __reg_11_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(2, __reg_11_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(3, __reg_11_0); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(4, __reg_11_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(5, __reg_11_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(6, __reg_11_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(7, __reg_11_1); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(8, __reg_11_2); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(9, __reg_11_0); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(10, __reg_11_1); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(11, __reg_11_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __LOAD(__reg_0, 21); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __LOAD(__reg_0, 22); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(11, __reg_11_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 23; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_10_2 = __reg_9_2; __CALC11(__reg_11_0, __reg_11_0, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); __reg_10_0 = __reg_9_0; __CALC11(__reg_11_1, __reg_11_1, __reg_11_2, __reg_10_0); __STORE(__h + 0, __reg_11_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 10, __reg_11_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 9, __reg_11_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 8, __reg_11_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 7, __reg_11_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 6, __reg_11_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 5, __reg_11_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 4, __reg_11_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 3, __reg_11_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 2, __reg_11_0); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 1, __reg_11_1); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h + 0, __reg_11_2); __reg_10_1 = __reg_9_1; __CALC11(__reg_11_2, __reg_11_2, __reg_11_0, __reg_10_1); __STORE(__h + 1, __reg_11_0); } } else { for (__h = 23; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __CALC11(__reg_11_2, __reg_11_1, __reg_11_0, __reg_10_1); __STORE(__h - 11, __reg_11_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __CALC11(__reg_11_0, __reg_11_2, __reg_11_1, __reg_10_2); __STORE(__h - 11, __reg_11_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __CALC11(__reg_11_1, __reg_11_0, __reg_11_2, __reg_10_0); __STORE(__h - 11, __reg_11_2); __h++; } } __global__ void kernel0_10(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 10; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 492; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; double __reg_10_0; double __reg_10_1; double __reg_10_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __writeValid10 = __updateValid && __local_c2 >= (__halo2 * 10) && __local_c2 < __side2LenOl - (__halo2 * 10); const AN5D_TYPE __storeValid = __writeValid10; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC10(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid10) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(1, __reg_10_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(2, __reg_10_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(3, __reg_10_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(4, __reg_10_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(5, __reg_10_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(6, __reg_10_0); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(7, __reg_10_1); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(8, __reg_10_2); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(9, __reg_10_0); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(10, __reg_10_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __LOAD(__reg_0, 19); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __LOAD(__reg_0, 20); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(10, __reg_10_1); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 21; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_9_0 = __reg_8_0; __CALC10(__reg_10_1, __reg_10_1, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); __reg_9_1 = __reg_8_1; __CALC10(__reg_10_2, __reg_10_2, __reg_10_0, __reg_9_1); __STORE(__h + 0, __reg_10_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 9, __reg_10_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 8, __reg_10_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 7, __reg_10_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 6, __reg_10_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 5, __reg_10_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 4, __reg_10_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 3, __reg_10_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 2, __reg_10_1); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 1, __reg_10_2); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h + 0, __reg_10_0); __reg_9_2 = __reg_8_2; __CALC10(__reg_10_0, __reg_10_0, __reg_10_1, __reg_9_2); __STORE(__h + 1, __reg_10_1); } } else { for (__h = 21; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __CALC10(__reg_10_1, __reg_10_0, __reg_10_2, __reg_9_0); __STORE(__h - 10, __reg_10_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __CALC10(__reg_10_2, __reg_10_1, __reg_10_0, __reg_9_1); __STORE(__h - 10, __reg_10_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __CALC10(__reg_10_0, __reg_10_2, __reg_10_1, __reg_9_2); __STORE(__h - 10, __reg_10_1); __h++; } } __global__ void kernel0_9(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 9; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 494; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; double __reg_9_0; double __reg_9_1; double __reg_9_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __writeValid9 = __updateValid && __local_c2 >= (__halo2 * 9) && __local_c2 < __side2LenOl - (__halo2 * 9); const AN5D_TYPE __storeValid = __writeValid9; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC9(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid9) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(1, __reg_9_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(2, __reg_9_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(3, __reg_9_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(4, __reg_9_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(5, __reg_9_2); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(6, __reg_9_0); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(7, __reg_9_1); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(8, __reg_9_2); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(9, __reg_9_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __LOAD(__reg_0, 17); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __LOAD(__reg_0, 18); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(9, __reg_9_0); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 19; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_8_1 = __reg_7_1; __CALC9(__reg_9_2, __reg_9_2, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); __reg_8_2 = __reg_7_2; __CALC9(__reg_9_0, __reg_9_0, __reg_9_1, __reg_8_2); __STORE(__h + 0, __reg_9_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 8, __reg_9_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 7, __reg_9_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 6, __reg_9_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 5, __reg_9_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 4, __reg_9_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 3, __reg_9_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 2, __reg_9_2); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 1, __reg_9_0); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h + 0, __reg_9_1); __reg_8_0 = __reg_7_0; __CALC9(__reg_9_1, __reg_9_1, __reg_9_2, __reg_8_0); __STORE(__h + 1, __reg_9_2); } } else { for (__h = 19; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __CALC9(__reg_9_0, __reg_9_2, __reg_9_1, __reg_8_2); __STORE(__h - 9, __reg_9_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __CALC9(__reg_9_1, __reg_9_0, __reg_9_2, __reg_8_0); __STORE(__h - 9, __reg_9_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __CALC9(__reg_9_2, __reg_9_1, __reg_9_0, __reg_8_1); __STORE(__h - 9, __reg_9_0); __h++; } } __global__ void kernel0_8(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 8; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 496; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; double __reg_8_0; double __reg_8_1; double __reg_8_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __writeValid8 = __updateValid && __local_c2 >= (__halo2 * 8) && __local_c2 < __side2LenOl - (__halo2 * 8); const AN5D_TYPE __storeValid = __writeValid8; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC8(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid8) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(1, __reg_8_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(2, __reg_8_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(3, __reg_8_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(4, __reg_8_1); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(5, __reg_8_2); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(6, __reg_8_0); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(7, __reg_8_1); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(8, __reg_8_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __LOAD(__reg_0, 15); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __LOAD(__reg_0, 16); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(8, __reg_8_2); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 17; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_7_2 = __reg_6_2; __CALC8(__reg_8_0, __reg_8_0, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); __reg_7_0 = __reg_6_0; __CALC8(__reg_8_1, __reg_8_1, __reg_8_2, __reg_7_0); __STORE(__h + 0, __reg_8_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 7, __reg_8_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 6, __reg_8_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 5, __reg_8_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 4, __reg_8_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 3, __reg_8_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 2, __reg_8_0); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 1, __reg_8_1); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h + 0, __reg_8_2); __reg_7_1 = __reg_6_1; __CALC8(__reg_8_2, __reg_8_2, __reg_8_0, __reg_7_1); __STORE(__h + 1, __reg_8_0); } } else { for (__h = 17; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __CALC8(__reg_8_2, __reg_8_1, __reg_8_0, __reg_7_1); __STORE(__h - 8, __reg_8_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __CALC8(__reg_8_0, __reg_8_2, __reg_8_1, __reg_7_2); __STORE(__h - 8, __reg_8_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __CALC8(__reg_8_1, __reg_8_0, __reg_8_2, __reg_7_0); __STORE(__h - 8, __reg_8_2); __h++; } } __global__ void kernel0_7(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 7; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 498; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; double __reg_7_0; double __reg_7_1; double __reg_7_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __writeValid7 = __updateValid && __local_c2 >= (__halo2 * 7) && __local_c2 < __side2LenOl - (__halo2 * 7); const AN5D_TYPE __storeValid = __writeValid7; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC7(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid7) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(1, __reg_7_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(2, __reg_7_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(3, __reg_7_0); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(4, __reg_7_1); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(5, __reg_7_2); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(6, __reg_7_0); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(7, __reg_7_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __LOAD(__reg_0, 13); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __LOAD(__reg_0, 14); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(7, __reg_7_1); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 15; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_6_0 = __reg_5_0; __CALC7(__reg_7_1, __reg_7_1, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); __reg_6_1 = __reg_5_1; __CALC7(__reg_7_2, __reg_7_2, __reg_7_0, __reg_6_1); __STORE(__h + 0, __reg_7_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 6, __reg_7_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 5, __reg_7_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 4, __reg_7_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 3, __reg_7_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 2, __reg_7_1); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 1, __reg_7_2); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h + 0, __reg_7_0); __reg_6_2 = __reg_5_2; __CALC7(__reg_7_0, __reg_7_0, __reg_7_1, __reg_6_2); __STORE(__h + 1, __reg_7_1); } } else { for (__h = 15; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __CALC7(__reg_7_1, __reg_7_0, __reg_7_2, __reg_6_0); __STORE(__h - 7, __reg_7_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __CALC7(__reg_7_2, __reg_7_1, __reg_7_0, __reg_6_1); __STORE(__h - 7, __reg_7_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __CALC7(__reg_7_0, __reg_7_2, __reg_7_1, __reg_6_2); __STORE(__h - 7, __reg_7_1); __h++; } } __global__ void kernel0_6(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 6; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 500; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; double __reg_6_0; double __reg_6_1; double __reg_6_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __writeValid6 = __updateValid && __local_c2 >= (__halo2 * 6) && __local_c2 < __side2LenOl - (__halo2 * 6); const AN5D_TYPE __storeValid = __writeValid6; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC6(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid6) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(1, __reg_6_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(2, __reg_6_2); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(3, __reg_6_0); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(4, __reg_6_1); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(5, __reg_6_2); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(6, __reg_6_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __LOAD(__reg_0, 11); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __LOAD(__reg_0, 12); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(6, __reg_6_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 13; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_5_1 = __reg_4_1; __CALC6(__reg_6_2, __reg_6_2, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); __reg_5_2 = __reg_4_2; __CALC6(__reg_6_0, __reg_6_0, __reg_6_1, __reg_5_2); __STORE(__h + 0, __reg_6_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 5, __reg_6_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 4, __reg_6_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 3, __reg_6_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 2, __reg_6_2); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 1, __reg_6_0); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h + 0, __reg_6_1); __reg_5_0 = __reg_4_0; __CALC6(__reg_6_1, __reg_6_1, __reg_6_2, __reg_5_0); __STORE(__h + 1, __reg_6_2); } } else { for (__h = 13; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __CALC6(__reg_6_0, __reg_6_2, __reg_6_1, __reg_5_2); __STORE(__h - 6, __reg_6_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __CALC6(__reg_6_1, __reg_6_0, __reg_6_2, __reg_5_0); __STORE(__h - 6, __reg_6_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __CALC6(__reg_6_2, __reg_6_1, __reg_6_0, __reg_5_1); __STORE(__h - 6, __reg_6_0); __h++; } } __global__ void kernel0_5(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 5; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 502; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; double __reg_5_0; double __reg_5_1; double __reg_5_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __writeValid5 = __updateValid && __local_c2 >= (__halo2 * 5) && __local_c2 < __side2LenOl - (__halo2 * 5); const AN5D_TYPE __storeValid = __writeValid5; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC5(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid5) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(1, __reg_5_1); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(2, __reg_5_2); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(3, __reg_5_0); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(4, __reg_5_1); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(5, __reg_5_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __LOAD(__reg_0, 9); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __LOAD(__reg_0, 10); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(5, __reg_5_2); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 11; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_4_2 = __reg_3_2; __CALC5(__reg_5_0, __reg_5_0, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); __reg_4_0 = __reg_3_0; __CALC5(__reg_5_1, __reg_5_1, __reg_5_2, __reg_4_0); __STORE(__h + 0, __reg_5_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 4, __reg_5_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 3, __reg_5_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 2, __reg_5_0); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 1, __reg_5_1); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h + 0, __reg_5_2); __reg_4_1 = __reg_3_1; __CALC5(__reg_5_2, __reg_5_2, __reg_5_0, __reg_4_1); __STORE(__h + 1, __reg_5_0); } } else { for (__h = 11; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __CALC5(__reg_5_2, __reg_5_1, __reg_5_0, __reg_4_1); __STORE(__h - 5, __reg_5_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __CALC5(__reg_5_0, __reg_5_2, __reg_5_1, __reg_4_2); __STORE(__h - 5, __reg_5_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __CALC5(__reg_5_1, __reg_5_0, __reg_5_2, __reg_4_0); __STORE(__h - 5, __reg_5_2); __h++; } } __global__ void kernel0_4(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 4; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 504; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; double __reg_4_0; double __reg_4_1; double __reg_4_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4); const AN5D_TYPE __storeValid = __writeValid4; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(1, __reg_4_1); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(2, __reg_4_2); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(3, __reg_4_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(4, __reg_4_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __LOAD(__reg_0, 8); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(4, __reg_4_1); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_3_0 = __reg_2_0; __CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); __reg_3_1 = __reg_2_1; __CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1); __STORE(__h + 0, __reg_4_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 3, __reg_4_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 2, __reg_4_1); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 1, __reg_4_2); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h + 0, __reg_4_0); __reg_3_2 = __reg_2_2; __CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2); __STORE(__h + 1, __reg_4_1); } } else { for (__h = 9; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0); __STORE(__h - 4, __reg_4_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1); __STORE(__h - 4, __reg_4_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2); __STORE(__h - 4, __reg_4_1); __h++; } } __global__ void kernel0_3(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 3; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 506; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; double __reg_3_0; double __reg_3_1; double __reg_3_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3); const AN5D_TYPE __storeValid = __writeValid3; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(1, __reg_3_1); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(2, __reg_3_2); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(3, __reg_3_0); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(3, __reg_3_0); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 0; if (__c1Id == __side1Num - 1) { for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __reg_2_1 = __reg_1_1; __CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); __reg_2_2 = __reg_1_2; __CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2); __STORE(__h + 0, __reg_3_1); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 2, __reg_3_2); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 1, __reg_3_0); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h + 0, __reg_3_1); __reg_2_0 = __reg_1_0; __CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0); __STORE(__h + 1, __reg_3_2); } } else { for (__h = 7; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2); __STORE(__h - 3, __reg_3_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0); __STORE(__h - 3, __reg_3_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1); __STORE(__h - 3, __reg_3_0); __h++; } } __global__ void kernel0_2(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 2; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 508; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_2_0; double __reg_2_1; double __reg_2_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2); const AN5D_TYPE __storeValid = __writeValid2; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(1, __reg_2_1); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(2, __reg_2_2); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __LOAD(__reg_0, 3); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __LOAD(__reg_0, 4); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(2, __reg_2_2); __DB_SWITCH(); __syncthreads(); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __reg_1_2 = __reg_0; __CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); __reg_1_0 = __reg_0; __CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0); __STORE(__h + 0, __reg_2_2); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 1, __reg_2_1); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h + 0, __reg_2_2); __reg_1_1 = __reg_0; __CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1); __STORE(__h + 1, __reg_2_0); } } else { for (__h = 5; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1); __STORE(__h - 2, __reg_2_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2); __STORE(__h - 2, __reg_2_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0); __STORE(__h - 2, __reg_2_2); __h++; } } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 1 - 1); const AN5D_TYPE __c1Pad = (1); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 1 - 1); const AN5D_TYPE __c2Pad = (1); #define __c2 c2 const AN5D_TYPE __halo1 = 1; const AN5D_TYPE __halo2 = 1; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 512; const AN5D_TYPE __side2Len = 510; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __blockSize = 1 * __side2LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num; const AN5D_TYPE __c2 = (blockIdx.x % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[((__c0 % 2) * dimsize + __c1) * dimsize + __c2]; }} while (0) #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = (((((((((0.09371f * (__SBREF(__a_sb, -1))) + (0.09374f * (__REGREF(__a, 0)))) + (0.09376f * (__SBREF(__a_sb, 1)))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = (((((((0.09372f * (__SBREF(__a_sb, -1)))) + (0.25001f * (__REGREF(__a, 0)))) + (0.09377f * (__SBREF(__a_sb, 1))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[(((c0 + 1) % 2) * dimsize + c1) * dimsize + c2]) #define __REGREF(reg, i2) reg #define __SBREF(sb, i2) __sbref_wrap(sb, (int)__tid + i2) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = ((((0.09373f * (__SBREF(__a_sb, -1)))) + (0.09375f * (__REGREF(__a, 0)))) + (0.09378f * (__SBREF(__a_sb, 1)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0); #define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(1, __reg_1_1); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(1, __reg_1_1); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); } else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } } else { for (__h = 3; __h <= __side1LenOl - 3;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0); __STORE(__h - 1, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 1, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 1, __reg_1_1); __h++; } }
e383085505ebc64e970f90f97bade06f095e8bc6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reduce3.h" __device__ float update(float old,float opOutput,float *extraParams) { return fmaxf(fabsf(old),fabsf(opOutput)); } __device__ float merge(float old,float opOutput,float *extraParams) { return fmaxf(fabsf(old),fabsf(opOutput)); } __device__ float op(float d1,float d2,float *extraParams) { return fmaxf(fabsf(d1),fabsf(d2)); } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return fmaxf(fabsf(reduction),fabsf(result[0])); } extern "C" __global__ void normmax_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
e383085505ebc64e970f90f97bade06f095e8bc6.cu
#include "reduce3.h" __device__ float update(float old,float opOutput,float *extraParams) { return fmaxf(fabsf(old),fabsf(opOutput)); } __device__ float merge(float old,float opOutput,float *extraParams) { return fmaxf(fabsf(old),fabsf(opOutput)); } __device__ float op(float d1,float d2,float *extraParams) { return fmaxf(fabsf(d1),fabsf(d2)); } __device__ float postProcess(float reduction,int n,int xOffset,float *dx,int incx,float *extraParams,float *result) { return fmaxf(fabsf(reduction),fabsf(result[0])); } extern "C" __global__ void normmax_strided_float(int n, int xOffset,float *dx,int incx,float *extraParams,float *result) { transform(n,xOffset,dx,incx,extraParams,result); }
bc9ced1916f32fb2415e5ee4214231b5ff188f45.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <THH/THHAtomics.cuh> #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 256 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data, const int height, const int width, scalar_t y, scalar_t x) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; // do bilinear interpolation scalar_t lt = bottom_data[y_low * width + x_low]; scalar_t rt = bottom_data[y_low * width + x_high]; scalar_t lb = bottom_data[y_high * width + x_low]; scalar_t rb = bottom_data[y_high * width + x_high]; scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); return val; } template <typename scalar_t> __launch_bounds__(256) __global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); scalar_t bin_size_h = roi_height / pooled_height; scalar_t bin_size_w = roi_width / pooled_width; const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; int sample_num_h = (sample_num > 0) ? sample_num : ceil(roi_height / pooled_height); // e.g., = 2 int sample_num_w = (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); scalar_t output_val = 0; for (int iy = 0; iy < sample_num_h; iy++) { const scalar_t y = roi_start_h + ph * bin_size_h + (scalar_t)(iy + scalar_t(.5f)) * bin_size_h / (scalar_t)(sample_num_h); for (int ix = 0; ix < sample_num_w; ix++) { const scalar_t x = roi_start_w + pw * bin_size_w + (scalar_t)(ix + scalar_t(.5f)) * bin_size_w / (scalar_t)(sample_num_w); scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data, height, width, y, x); output_val += val; } } output_val /= (sample_num_h * sample_num_w); top_data[index] = output_val; } } int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "ROIAlignLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); hipLaunchKernelGGL(( ROIAlignForward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, bottom_data, rois_data, scalar_t(spatial_scale), sample_num, channels, height, width, pooled_height, pooled_width, top_data); })); THCudaCheck(hipGetLastError()); return 1; } template <typename scalar_t> __device__ void bilinear_interpolate_gradient(const int height, const int width, scalar_t y, scalar_t x, scalar_t &w1, scalar_t &w2, scalar_t &w3, scalar_t &w4, int &x_low, int &x_high, int &y_low, int &y_high) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename scalar_t> __launch_bounds__(256) __global__ void ROIAlignBackward( const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); scalar_t bin_size_h = roi_height / pooled_height; scalar_t bin_size_w = roi_width / pooled_width; scalar_t *offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int offset_top = (n * channels + c) * pooled_height * pooled_width + ph * pooled_width + pw; scalar_t offset_top_diff = top_diff[offset_top]; int sample_num_h = (sample_num > 0) ? sample_num : ceil(roi_height / pooled_height); // e.g., = 2 int sample_num_w = (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); for (int iy = 0; iy < sample_num_h; iy++) { const scalar_t y = roi_start_h + ph * bin_size_h + (scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h); for (int ix = 0; ix < sample_num_w; ix++) { const scalar_t x = roi_start_w + pw * bin_size_w + (scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w); scalar_t w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient<scalar_t>( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); scalar_t g1 = offset_top_diff * w1 / count; scalar_t g2 = offset_top_diff * w2 / count; scalar_t g3 = offset_top_diff * w3 / count; scalar_t g4 = offset_top_diff * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); } } } } } int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported\n"); exit(-1); } hipLaunchKernelGGL(( ROIAlignBackward<scalar_t>) , dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0, output_size, top_diff, rois_data, spatial_scale, sample_num, channels, height, width, pooled_height, pooled_width, bottom_diff); })); THCudaCheck(hipGetLastError()); return 1; }
bc9ced1916f32fb2415e5ee4214231b5ff188f45.cu
#include <ATen/ATen.h> #include <THC/THCAtomics.cuh> #ifndef AT_CHECK #define AT_CHECK TORCH_CHECK #endif #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) #define THREADS_PER_BLOCK 256 inline int GET_BLOCKS(const int N) { int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; int max_block_num = 65000; return min(optimal_block_num, max_block_num); } template <typename scalar_t> __device__ scalar_t bilinear_interpolate(const scalar_t *bottom_data, const int height, const int width, scalar_t y, scalar_t x) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { return 0; } if (y <= 0) y = 0; if (x <= 0) x = 0; int y_low = (int)y; int x_low = (int)x; int y_high; int x_high; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; // do bilinear interpolation scalar_t lt = bottom_data[y_low * width + x_low]; scalar_t rt = bottom_data[y_low * width + x_high]; scalar_t lb = bottom_data[y_high * width + x_low]; scalar_t rb = bottom_data[y_high * width + x_high]; scalar_t w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; scalar_t val = (w1 * lt + w2 * rt + w3 * lb + w4 * rb); return val; } template <typename scalar_t> __launch_bounds__(256) __global__ void ROIAlignForward(const int nthreads, const scalar_t *bottom_data, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *top_data) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); scalar_t bin_size_h = roi_height / pooled_height; scalar_t bin_size_w = roi_width / pooled_width; const scalar_t *offset_bottom_data = bottom_data + (roi_batch_ind * channels + c) * height * width; int sample_num_h = (sample_num > 0) ? sample_num : ceil(roi_height / pooled_height); // e.g., = 2 int sample_num_w = (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); scalar_t output_val = 0; for (int iy = 0; iy < sample_num_h; iy++) { const scalar_t y = roi_start_h + ph * bin_size_h + (scalar_t)(iy + scalar_t(.5f)) * bin_size_h / (scalar_t)(sample_num_h); for (int ix = 0; ix < sample_num_w; ix++) { const scalar_t x = roi_start_w + pw * bin_size_w + (scalar_t)(ix + scalar_t(.5f)) * bin_size_w / (scalar_t)(sample_num_w); scalar_t val = bilinear_interpolate<scalar_t>(offset_bottom_data, height, width, y, x); output_val += val; } } output_val /= (sample_num_h * sample_num_w); top_data[index] = output_val; } } int ROIAlignForwardLaucher(const at::Tensor features, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor output) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( features.scalar_type(), "ROIAlignLaucherForward", ([&] { const scalar_t *bottom_data = features.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *top_data = output.data<scalar_t>(); ROIAlignForward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, bottom_data, rois_data, scalar_t(spatial_scale), sample_num, channels, height, width, pooled_height, pooled_width, top_data); })); THCudaCheck(cudaGetLastError()); return 1; } template <typename scalar_t> __device__ void bilinear_interpolate_gradient(const int height, const int width, scalar_t y, scalar_t x, scalar_t &w1, scalar_t &w2, scalar_t &w3, scalar_t &w4, int &x_low, int &x_high, int &y_low, int &y_high) { // deal with cases that inverse elements are out of feature map boundary if (y < -1.0 || y > height || x < -1.0 || x > width) { w1 = w2 = w3 = w4 = 0.; x_low = x_high = y_low = y_high = -1; return; } if (y <= 0) y = 0; if (x <= 0) x = 0; y_low = (int)y; x_low = (int)x; if (y_low >= height - 1) { y_high = y_low = height - 1; y = (scalar_t)y_low; } else { y_high = y_low + 1; } if (x_low >= width - 1) { x_high = x_low = width - 1; x = (scalar_t)x_low; } else { x_high = x_low + 1; } scalar_t ly = y - y_low; scalar_t lx = x - x_low; scalar_t hy = 1. - ly; scalar_t hx = 1. - lx; w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; return; } template <typename scalar_t> __launch_bounds__(256) __global__ void ROIAlignBackward( const int nthreads, const scalar_t *top_diff, const scalar_t *bottom_rois, const scalar_t spatial_scale, const int sample_num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, scalar_t *bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // (n, c, ph, pw) is an element in the aligned output int pw = index % pooled_width; int ph = (index / pooled_width) % pooled_height; int c = (index / pooled_width / pooled_height) % channels; int n = index / pooled_width / pooled_height / channels; const scalar_t *offset_bottom_rois = bottom_rois + n * 5; int roi_batch_ind = offset_bottom_rois[0]; scalar_t roi_start_w = offset_bottom_rois[1] * spatial_scale; scalar_t roi_start_h = offset_bottom_rois[2] * spatial_scale; scalar_t roi_end_w = (offset_bottom_rois[3] + 1) * spatial_scale; scalar_t roi_end_h = (offset_bottom_rois[4] + 1) * spatial_scale; // Force malformed ROIs to be 1x1 scalar_t roi_width = fmaxf((scalar_t)roi_end_w - roi_start_w, 0.); scalar_t roi_height = fmaxf((scalar_t)roi_end_h - roi_start_h, 0.); scalar_t bin_size_h = roi_height / pooled_height; scalar_t bin_size_w = roi_width / pooled_width; scalar_t *offset_bottom_diff = bottom_diff + (roi_batch_ind * channels + c) * height * width; int offset_top = (n * channels + c) * pooled_height * pooled_width + ph * pooled_width + pw; scalar_t offset_top_diff = top_diff[offset_top]; int sample_num_h = (sample_num > 0) ? sample_num : ceil(roi_height / pooled_height); // e.g., = 2 int sample_num_w = (sample_num > 0) ? sample_num : ceil(roi_width / pooled_width); const scalar_t count = (scalar_t)(sample_num_h * sample_num_w); for (int iy = 0; iy < sample_num_h; iy++) { const scalar_t y = roi_start_h + ph * bin_size_h + (scalar_t)(iy + .5f) * bin_size_h / (scalar_t)(sample_num_h); for (int ix = 0; ix < sample_num_w; ix++) { const scalar_t x = roi_start_w + pw * bin_size_w + (scalar_t)(ix + .5f) * bin_size_w / (scalar_t)(sample_num_w); scalar_t w1, w2, w3, w4; int x_low, x_high, y_low, y_high; bilinear_interpolate_gradient<scalar_t>( height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high); scalar_t g1 = offset_top_diff * w1 / count; scalar_t g2 = offset_top_diff * w2 / count; scalar_t g3 = offset_top_diff * w3 / count; scalar_t g4 = offset_top_diff * w4 / count; if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { atomicAdd(offset_bottom_diff + y_low * width + x_low, g1); atomicAdd(offset_bottom_diff + y_low * width + x_high, g2); atomicAdd(offset_bottom_diff + y_high * width + x_low, g3); atomicAdd(offset_bottom_diff + y_high * width + x_high, g4); } } } } } int ROIAlignBackwardLaucher(const at::Tensor top_grad, const at::Tensor rois, const float spatial_scale, const int sample_num, const int channels, const int height, const int width, const int num_rois, const int pooled_height, const int pooled_width, at::Tensor bottom_grad) { const int output_size = num_rois * pooled_height * pooled_width * channels; AT_DISPATCH_FLOATING_TYPES_AND_HALF( top_grad.scalar_type(), "ROIAlignLaucherBackward", ([&] { const scalar_t *top_diff = top_grad.data<scalar_t>(); const scalar_t *rois_data = rois.data<scalar_t>(); scalar_t *bottom_diff = bottom_grad.data<scalar_t>(); if (sizeof(scalar_t) == sizeof(double)) { fprintf(stderr, "double is not supported\n"); exit(-1); } ROIAlignBackward<scalar_t> <<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>( output_size, top_diff, rois_data, spatial_scale, sample_num, channels, height, width, pooled_height, pooled_width, bottom_diff); })); THCudaCheck(cudaGetLastError()); return 1; }
b70a8a1daaca9cd09c61ee1baefe24613f401b95.hip
// !!! This is a file automatically generated by hipify!!! #include <string> #include <algorithm> #include <math.h> #include <stdio.h> #include <vector> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "cudaRenderer.h" #include "image.h" #include "noise.h" #include "sceneLoader.h" #include "util.h" //////////////////////////////////////////////////////////////////////////////////////// // Putting all the cuda kernels here /////////////////////////////////////////////////////////////////////////////////////// struct GlobalConstants { SceneName sceneName; int numCircles; float* position; float* velocity; float* color; float* radius; int imageWidth; int imageHeight; float* imageData; }; // Global variable that is in scope, but read-only, for all cuda // kernels. The __constant__ modifier designates this variable will // be stored in special "constant" memory on the GPU. (we didn't talk // about this type of memory in class, but constant memory is a fast // place to put read-only variables). __constant__ GlobalConstants cuConstRendererParams; // read-only lookup tables used to quickly compute noise (needed by // advanceAnimation for the snowflake scene) __constant__ int cuConstNoiseYPermutationTable[256]; __constant__ int cuConstNoiseXPermutationTable[256]; __constant__ float cuConstNoise1DValueTable[256]; // color ramp table needed for the color ramp lookup shader #define COLOR_MAP_SIZE 5 __constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3]; // including parts of the CUDA code from external files to keep this // file simpler and to seperate code that should not be modified #include "noiseCuda.cu_inl" #include "lookupColor.cu_inl" // kernelClearImageSnowflake -- (CUDA device code) // // Clear the image, setting the image to the white-gray gradation that // is used in the snowflake image __global__ void kernelClearImageSnowflake() { int imageX = blockIdx.x * blockDim.x + threadIdx.x; int imageY = blockIdx.y * blockDim.y + threadIdx.y; int width = cuConstRendererParams.imageWidth; int height = cuConstRendererParams.imageHeight; if (imageX >= width || imageY >= height) return; int offset = 4 * (imageY * width + imageX); float shade = .4f + .45f * static_cast<float>(height-imageY) / height; float4 value = make_float4(shade, shade, shade, 1.f); // write to global memory: As an optimization, I use a float4 // store, that results in more efficient code than if I coded this // up as four seperate fp32 stores. *(float4*)(&cuConstRendererParams.imageData[offset]) = value; } // kernelClearImage -- (CUDA device code) // // Clear the image, setting all pixels to the specified color rgba __global__ void kernelClearImage(float r, float g, float b, float a) { int imageX = blockIdx.x * blockDim.x + threadIdx.x; int imageY = blockIdx.y * blockDim.y + threadIdx.y; int width = cuConstRendererParams.imageWidth; int height = cuConstRendererParams.imageHeight; if (imageX >= width || imageY >= height) return; int offset = 4 * (imageY * width + imageX); float4 value = make_float4(r, g, b, a); // write to global memory: As an optimization, I use a float4 // store, that results in more efficient code than if I coded this // up as four seperate fp32 stores. *(float4*)(&cuConstRendererParams.imageData[offset]) = value; } // kernelAdvanceFireWorks // // Update the position of the fireworks (if circle is firework) __global__ void kernelAdvanceFireWorks() { const float dt = 1.f / 60.f; const float pi = 3.14159; const float maxDist = 0.25f; float* velocity = cuConstRendererParams.velocity; float* position = cuConstRendererParams.position; float* radius = cuConstRendererParams.radius; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update return; } // determine the fire-work center/spark indices int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS; int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS; int index3i = 3 * fIdx; int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx; int index3j = 3 * sIdx; float cx = position[index3i]; float cy = position[index3i+1]; // update position position[index3j] += velocity[index3j] * dt; position[index3j+1] += velocity[index3j+1] * dt; // fire-work sparks float sx = position[index3j]; float sy = position[index3j+1]; // compute vector from firework-spark float cxsx = sx - cx; float cysy = sy - cy; // compute distance from fire-work float dist = sqrt(cxsx * cxsx + cysy * cysy); if (dist > maxDist) { // restore to starting position // random starting position on fire-work's rim float angle = (sfIdx * 2 * pi)/NUM_SPARKS; float sinA = sin(angle); float cosA = cos(angle); float x = cosA * radius[fIdx]; float y = sinA * radius[fIdx]; position[index3j] = position[index3i] + x; position[index3j+1] = position[index3i+1] + y; position[index3j+2] = 0.0f; // travel scaled unit length velocity[index3j] = cosA/5.0; velocity[index3j+1] = sinA/5.0; velocity[index3j+2] = 0.0f; } } // kernelAdvanceHypnosis // // Update the radius/color of the circles __global__ void kernelAdvanceHypnosis() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; float* radius = cuConstRendererParams.radius; float cutOff = 0.5f; // place circle back in center after reaching threshold radisus if (radius[index] > cutOff) { radius[index] = 0.02f; } else { radius[index] += 0.01f; } } // kernelAdvanceBouncingBalls // // Update the positino of the balls __global__ void kernelAdvanceBouncingBalls() { const float dt = 1.f / 60.f; const float kGravity = -2.8f; // sorry Newton const float kDragCoeff = -0.8f; const float epsilon = 0.001f; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; float* velocity = cuConstRendererParams.velocity; float* position = cuConstRendererParams.position; int index3 = 3 * index; // reverse velocity if center position < 0 float oldVelocity = velocity[index3+1]; float oldPosition = position[index3+1]; if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition return; } if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball velocity[index3+1] *= kDragCoeff; } // update velocity: v = u + at (only along y-axis) velocity[index3+1] += kGravity * dt; // update positions (only along y-axis) position[index3+1] += velocity[index3+1] * dt; if (fabsf(velocity[index3+1] - oldVelocity) < epsilon && oldPosition < 0.0f && fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball velocity[index3+1] = 0.f; position[index3+1] = 0.f; } } // kernelAdvanceSnowflake -- (CUDA device code) // // move the snowflake animation forward one time step. Updates circle // positions and velocities. Note how the position of the snowflake // is reset if it moves off the left, right, or bottom of the screen. __global__ void kernelAdvanceSnowflake() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; const float dt = 1.f / 60.f; const float kGravity = -1.8f; // sorry Newton const float kDragCoeff = 2.f; int index3 = 3 * index; float* positionPtr = &cuConstRendererParams.position[index3]; float* velocityPtr = &cuConstRendererParams.velocity[index3]; // loads from global memory float3 position = *((float3*)positionPtr); float3 velocity = *((float3*)velocityPtr); // hack to make farther circles move more slowly, giving the // illusion of parallax float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp // add some noise to the motion to make the snow flutter float3 noiseInput; noiseInput.x = 10.f * position.x; noiseInput.y = 10.f * position.y; noiseInput.z = 255.f * position.z; float2 noiseForce = cudaVec2CellNoise(noiseInput, index); noiseForce.x *= 7.5f; noiseForce.y *= 5.f; // drag float2 dragForce; dragForce.x = -1.f * kDragCoeff * velocity.x; dragForce.y = -1.f * kDragCoeff * velocity.y; // update positions position.x += velocity.x * dt; position.y += velocity.y * dt; // update velocities velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt; velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt; float radius = cuConstRendererParams.radius[index]; // if the snowflake has moved off the left, right or bottom of // the screen, place it back at the top and give it a // pseudorandom x position and velocity. if ( (position.y + radius < 0.f) || (position.x + radius) < -0.f || (position.x - radius) > 1.f) { noiseInput.x = 255.f * position.x; noiseInput.y = 255.f * position.y; noiseInput.z = 255.f * position.z; noiseForce = cudaVec2CellNoise(noiseInput, index); position.x = .5f + .5f * noiseForce.x; position.y = 1.35f + radius; // restart from 0 vertical velocity. Choose a // pseudo-random horizontal velocity. velocity.x = 2.f * noiseForce.y; velocity.y = 0.f; } // store updated positions and velocities to global memory *((float3*)positionPtr) = position; *((float3*)velocityPtr) = velocity; } // shadePixel -- (CUDA device code) // // given a pixel and a circle, determines the contribution to the // pixel from the circle. Update of the image is done in this // function. Called by kernelRenderCircles() __device__ __inline__ void shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) { float diffX = p.x - pixelCenter.x; float diffY = p.y - pixelCenter.y; float pixelDist = diffX * diffX + diffY * diffY; float rad = cuConstRendererParams.radius[circleIndex];; float maxDist = rad * rad; // circle does not contribute to the image if (pixelDist > maxDist) return; float3 rgb; float alpha; // there is a non-zero contribution. Now compute the shading value // suggestion: This conditional is in the inner loop. Although it // will evaluate the same for all threads, there is overhead in // setting up the lane masks etc to implement the conditional. It // would be wise to perform this logic outside of the loop next in // kernelRenderCircles. (If feeling good about yourself, you // could use some specialized template magic). if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) { const float kCircleMaxAlpha = .5f; const float falloffScale = 4.f; float normPixelDist = sqrt(pixelDist) / rad; rgb = lookupColor(normPixelDist); float maxAlpha = .6f + .4f * (1.f-p.z); maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist); } else { // simple: each circle has an assigned color int index3 = 3 * circleIndex; rgb = *(float3*)&(cuConstRendererParams.color[index3]); alpha = .5f; } float oneMinusAlpha = 1.f - alpha; // BEGIN SHOULD-BE-ATOMIC REGION // global memory read float4 existingColor = *imagePtr; float4 newColor; newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x; newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y; newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z; newColor.w = alpha + existingColor.w; // global memory write *imagePtr = newColor; // END SHOULD-BE-ATOMIC REGION } // kernelRenderCircles -- (CUDA device code) // // Each thread renders a circle. Since there is no protection to // ensure order of update or mutual exclusion on the output image, the // resulting image will be incorrect. __global__ void kernelRenderCircles() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; int index3 = 3 * index; // read position and radius float3 p = *(float3*)(&cuConstRendererParams.position[index3]); float rad = cuConstRendererParams.radius[index]; // compute the bounding box of the circle. The bound is in integer // screen coordinates, so it's clamped to the edges of the screen. short imageWidth = cuConstRendererParams.imageWidth; short imageHeight = cuConstRendererParams.imageHeight; short minX = static_cast<short>(imageWidth * (p.x - rad)); short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1; short minY = static_cast<short>(imageHeight * (p.y - rad)); short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1; // a bunch of clamps. Is there a CUDA built-in for this? short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0; short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0; short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0; short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0; float invWidth = 1.f / imageWidth; float invHeight = 1.f / imageHeight; // for all pixels in the bonding box for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) { float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]); for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) { float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f), invHeight * (static_cast<float>(pixelY) + 0.5f)); shadePixel(index, pixelCenterNorm, p, imgPtr); imgPtr++; } } } //////////////////////////////////////////////////////////////////////////////////////// CudaRenderer::CudaRenderer() { image = NULL; numCircles = 0; position = NULL; velocity = NULL; color = NULL; radius = NULL; cudaDevicePosition = NULL; cudaDeviceVelocity = NULL; cudaDeviceColor = NULL; cudaDeviceRadius = NULL; cudaDeviceImageData = NULL; } CudaRenderer::~CudaRenderer() { if (image) { delete image; } if (position) { delete [] position; delete [] velocity; delete [] color; delete [] radius; } if (cudaDevicePosition) { hipFree(cudaDevicePosition); hipFree(cudaDeviceVelocity); hipFree(cudaDeviceColor); hipFree(cudaDeviceRadius); hipFree(cudaDeviceImageData); } } const Image* CudaRenderer::getImage() { // need to copy contents of the rendered image from device memory // before we expose the Image object to the caller printf("Copying image data from device\n"); hipMemcpy(image->data, cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height, hipMemcpyDeviceToHost); return image; } void CudaRenderer::loadScene(SceneName scene) { sceneName = scene; loadCircleScene(sceneName, numCircles, position, velocity, color, radius); } void CudaRenderer::setup() { int deviceCount = 0; std::string name; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CudaRenderer\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); // By this time the scene should be loaded. Now copy all the key // data structures into device memory so they are accessible to // CUDA kernels // // See the CUDA Programmer's Guide for descriptions of // hipMalloc and hipMemcpy hipMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles); hipMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles); hipMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles); hipMalloc(&cudaDeviceRadius, sizeof(float) * numCircles); hipMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height); hipMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice); hipMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice); hipMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, hipMemcpyHostToDevice); hipMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, hipMemcpyHostToDevice); // Initialize parameters in constant memory. We didn't talk about // constant memory in class, but the use of read-only constant // memory here is an optimization over just sticking these values // in device global memory. NVIDIA GPUs have a few special tricks // for optimizing access to constant memory. Using global memory // here would have worked just as well. See the Programmer's // Guide for more information about constant memory. GlobalConstants params; params.sceneName = sceneName; params.numCircles = numCircles; params.imageWidth = image->width; params.imageHeight = image->height; params.position = cudaDevicePosition; params.velocity = cudaDeviceVelocity; params.color = cudaDeviceColor; params.radius = cudaDeviceRadius; params.imageData = cudaDeviceImageData; hipMemcpyToSymbol(cuConstRendererParams, &params, sizeof(GlobalConstants)); // also need to copy over the noise lookup tables, so we can // implement noise on the GPU int* permX; int* permY; float* value1D; getNoiseTables(&permX, &permY, &value1D); hipMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256); hipMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256); hipMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256); // last, copy over the color table that's used by the shading // function for circles in the snowflake demo float lookupTable[COLOR_MAP_SIZE][3] = { {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {.8f, .9f, 1.f}, {.8f, .9f, 1.f}, {.8f, 0.8f, 1.f}, }; hipMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE); } // allocOutputImage -- // // Allocate buffer the renderer will render into. Check status of // image first to avoid memory leak. void CudaRenderer::allocOutputImage(int width, int height) { if (image) delete image; image = new Image(width, height); } // clearImage -- // // Clear's the renderer's target image. The state of the image after // the clear depends on the scene being rendered. void CudaRenderer::clearImage() { // 256 threads per block is a healthy number dim3 blockDim(16, 16, 1); dim3 gridDim( (image->width + blockDim.x - 1) / blockDim.x, (image->height + blockDim.y - 1) / blockDim.y); if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) { hipLaunchKernelGGL(( kernelClearImageSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, ); } else { hipLaunchKernelGGL(( kernelClearImage), dim3(gridDim), dim3(blockDim), 0, 0, 1.f, 1.f, 1.f, 1.f); } hipDeviceSynchronize(); } // advanceAnimation -- // // Advance the simulation one time step. Updates all circle positions // and velocities void CudaRenderer::advanceAnimation() { // 256 threads per block is a healthy number dim3 blockDim(256, 1); dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x); // only the snowflake scene has animation if (sceneName == SNOWFLAKES) { hipLaunchKernelGGL(( kernelAdvanceSnowflake), dim3(gridDim), dim3(blockDim), 0, 0, ); } else if (sceneName == BOUNCING_BALLS) { hipLaunchKernelGGL(( kernelAdvanceBouncingBalls), dim3(gridDim), dim3(blockDim), 0, 0, ); } else if (sceneName == HYPNOSIS) { hipLaunchKernelGGL(( kernelAdvanceHypnosis), dim3(gridDim), dim3(blockDim), 0, 0, ); } else if (sceneName == FIREWORKS) { hipLaunchKernelGGL(( kernelAdvanceFireWorks), dim3(gridDim), dim3(blockDim), 0, 0, ); } hipDeviceSynchronize(); } void CudaRenderer::render() { // 256 threads per block is a healthy number dim3 blockDim(256, 1); dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x); hipLaunchKernelGGL(( kernelRenderCircles), dim3(gridDim), dim3(blockDim), 0, 0, ); hipDeviceSynchronize(); }
b70a8a1daaca9cd09c61ee1baefe24613f401b95.cu
#include <string> #include <algorithm> #include <math.h> #include <stdio.h> #include <vector> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "cudaRenderer.h" #include "image.h" #include "noise.h" #include "sceneLoader.h" #include "util.h" //////////////////////////////////////////////////////////////////////////////////////// // Putting all the cuda kernels here /////////////////////////////////////////////////////////////////////////////////////// struct GlobalConstants { SceneName sceneName; int numCircles; float* position; float* velocity; float* color; float* radius; int imageWidth; int imageHeight; float* imageData; }; // Global variable that is in scope, but read-only, for all cuda // kernels. The __constant__ modifier designates this variable will // be stored in special "constant" memory on the GPU. (we didn't talk // about this type of memory in class, but constant memory is a fast // place to put read-only variables). __constant__ GlobalConstants cuConstRendererParams; // read-only lookup tables used to quickly compute noise (needed by // advanceAnimation for the snowflake scene) __constant__ int cuConstNoiseYPermutationTable[256]; __constant__ int cuConstNoiseXPermutationTable[256]; __constant__ float cuConstNoise1DValueTable[256]; // color ramp table needed for the color ramp lookup shader #define COLOR_MAP_SIZE 5 __constant__ float cuConstColorRamp[COLOR_MAP_SIZE][3]; // including parts of the CUDA code from external files to keep this // file simpler and to seperate code that should not be modified #include "noiseCuda.cu_inl" #include "lookupColor.cu_inl" // kernelClearImageSnowflake -- (CUDA device code) // // Clear the image, setting the image to the white-gray gradation that // is used in the snowflake image __global__ void kernelClearImageSnowflake() { int imageX = blockIdx.x * blockDim.x + threadIdx.x; int imageY = blockIdx.y * blockDim.y + threadIdx.y; int width = cuConstRendererParams.imageWidth; int height = cuConstRendererParams.imageHeight; if (imageX >= width || imageY >= height) return; int offset = 4 * (imageY * width + imageX); float shade = .4f + .45f * static_cast<float>(height-imageY) / height; float4 value = make_float4(shade, shade, shade, 1.f); // write to global memory: As an optimization, I use a float4 // store, that results in more efficient code than if I coded this // up as four seperate fp32 stores. *(float4*)(&cuConstRendererParams.imageData[offset]) = value; } // kernelClearImage -- (CUDA device code) // // Clear the image, setting all pixels to the specified color rgba __global__ void kernelClearImage(float r, float g, float b, float a) { int imageX = blockIdx.x * blockDim.x + threadIdx.x; int imageY = blockIdx.y * blockDim.y + threadIdx.y; int width = cuConstRendererParams.imageWidth; int height = cuConstRendererParams.imageHeight; if (imageX >= width || imageY >= height) return; int offset = 4 * (imageY * width + imageX); float4 value = make_float4(r, g, b, a); // write to global memory: As an optimization, I use a float4 // store, that results in more efficient code than if I coded this // up as four seperate fp32 stores. *(float4*)(&cuConstRendererParams.imageData[offset]) = value; } // kernelAdvanceFireWorks // // Update the position of the fireworks (if circle is firework) __global__ void kernelAdvanceFireWorks() { const float dt = 1.f / 60.f; const float pi = 3.14159; const float maxDist = 0.25f; float* velocity = cuConstRendererParams.velocity; float* position = cuConstRendererParams.position; float* radius = cuConstRendererParams.radius; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; if (0 <= index && index < NUM_FIREWORKS) { // firework center; no update return; } // determine the fire-work center/spark indices int fIdx = (index - NUM_FIREWORKS) / NUM_SPARKS; int sfIdx = (index - NUM_FIREWORKS) % NUM_SPARKS; int index3i = 3 * fIdx; int sIdx = NUM_FIREWORKS + fIdx * NUM_SPARKS + sfIdx; int index3j = 3 * sIdx; float cx = position[index3i]; float cy = position[index3i+1]; // update position position[index3j] += velocity[index3j] * dt; position[index3j+1] += velocity[index3j+1] * dt; // fire-work sparks float sx = position[index3j]; float sy = position[index3j+1]; // compute vector from firework-spark float cxsx = sx - cx; float cysy = sy - cy; // compute distance from fire-work float dist = sqrt(cxsx * cxsx + cysy * cysy); if (dist > maxDist) { // restore to starting position // random starting position on fire-work's rim float angle = (sfIdx * 2 * pi)/NUM_SPARKS; float sinA = sin(angle); float cosA = cos(angle); float x = cosA * radius[fIdx]; float y = sinA * radius[fIdx]; position[index3j] = position[index3i] + x; position[index3j+1] = position[index3i+1] + y; position[index3j+2] = 0.0f; // travel scaled unit length velocity[index3j] = cosA/5.0; velocity[index3j+1] = sinA/5.0; velocity[index3j+2] = 0.0f; } } // kernelAdvanceHypnosis // // Update the radius/color of the circles __global__ void kernelAdvanceHypnosis() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; float* radius = cuConstRendererParams.radius; float cutOff = 0.5f; // place circle back in center after reaching threshold radisus if (radius[index] > cutOff) { radius[index] = 0.02f; } else { radius[index] += 0.01f; } } // kernelAdvanceBouncingBalls // // Update the positino of the balls __global__ void kernelAdvanceBouncingBalls() { const float dt = 1.f / 60.f; const float kGravity = -2.8f; // sorry Newton const float kDragCoeff = -0.8f; const float epsilon = 0.001f; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; float* velocity = cuConstRendererParams.velocity; float* position = cuConstRendererParams.position; int index3 = 3 * index; // reverse velocity if center position < 0 float oldVelocity = velocity[index3+1]; float oldPosition = position[index3+1]; if (oldVelocity == 0.f && oldPosition == 0.f) { // stop-condition return; } if (position[index3+1] < 0 && oldVelocity < 0.f) { // bounce ball velocity[index3+1] *= kDragCoeff; } // update velocity: v = u + at (only along y-axis) velocity[index3+1] += kGravity * dt; // update positions (only along y-axis) position[index3+1] += velocity[index3+1] * dt; if (fabsf(velocity[index3+1] - oldVelocity) < epsilon && oldPosition < 0.0f && fabsf(position[index3+1]-oldPosition) < epsilon) { // stop ball velocity[index3+1] = 0.f; position[index3+1] = 0.f; } } // kernelAdvanceSnowflake -- (CUDA device code) // // move the snowflake animation forward one time step. Updates circle // positions and velocities. Note how the position of the snowflake // is reset if it moves off the left, right, or bottom of the screen. __global__ void kernelAdvanceSnowflake() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; const float dt = 1.f / 60.f; const float kGravity = -1.8f; // sorry Newton const float kDragCoeff = 2.f; int index3 = 3 * index; float* positionPtr = &cuConstRendererParams.position[index3]; float* velocityPtr = &cuConstRendererParams.velocity[index3]; // loads from global memory float3 position = *((float3*)positionPtr); float3 velocity = *((float3*)velocityPtr); // hack to make farther circles move more slowly, giving the // illusion of parallax float forceScaling = fmin(fmax(1.f - position.z, .1f), 1.f); // clamp // add some noise to the motion to make the snow flutter float3 noiseInput; noiseInput.x = 10.f * position.x; noiseInput.y = 10.f * position.y; noiseInput.z = 255.f * position.z; float2 noiseForce = cudaVec2CellNoise(noiseInput, index); noiseForce.x *= 7.5f; noiseForce.y *= 5.f; // drag float2 dragForce; dragForce.x = -1.f * kDragCoeff * velocity.x; dragForce.y = -1.f * kDragCoeff * velocity.y; // update positions position.x += velocity.x * dt; position.y += velocity.y * dt; // update velocities velocity.x += forceScaling * (noiseForce.x + dragForce.y) * dt; velocity.y += forceScaling * (kGravity + noiseForce.y + dragForce.y) * dt; float radius = cuConstRendererParams.radius[index]; // if the snowflake has moved off the left, right or bottom of // the screen, place it back at the top and give it a // pseudorandom x position and velocity. if ( (position.y + radius < 0.f) || (position.x + radius) < -0.f || (position.x - radius) > 1.f) { noiseInput.x = 255.f * position.x; noiseInput.y = 255.f * position.y; noiseInput.z = 255.f * position.z; noiseForce = cudaVec2CellNoise(noiseInput, index); position.x = .5f + .5f * noiseForce.x; position.y = 1.35f + radius; // restart from 0 vertical velocity. Choose a // pseudo-random horizontal velocity. velocity.x = 2.f * noiseForce.y; velocity.y = 0.f; } // store updated positions and velocities to global memory *((float3*)positionPtr) = position; *((float3*)velocityPtr) = velocity; } // shadePixel -- (CUDA device code) // // given a pixel and a circle, determines the contribution to the // pixel from the circle. Update of the image is done in this // function. Called by kernelRenderCircles() __device__ __inline__ void shadePixel(int circleIndex, float2 pixelCenter, float3 p, float4* imagePtr) { float diffX = p.x - pixelCenter.x; float diffY = p.y - pixelCenter.y; float pixelDist = diffX * diffX + diffY * diffY; float rad = cuConstRendererParams.radius[circleIndex];; float maxDist = rad * rad; // circle does not contribute to the image if (pixelDist > maxDist) return; float3 rgb; float alpha; // there is a non-zero contribution. Now compute the shading value // suggestion: This conditional is in the inner loop. Although it // will evaluate the same for all threads, there is overhead in // setting up the lane masks etc to implement the conditional. It // would be wise to perform this logic outside of the loop next in // kernelRenderCircles. (If feeling good about yourself, you // could use some specialized template magic). if (cuConstRendererParams.sceneName == SNOWFLAKES || cuConstRendererParams.sceneName == SNOWFLAKES_SINGLE_FRAME) { const float kCircleMaxAlpha = .5f; const float falloffScale = 4.f; float normPixelDist = sqrt(pixelDist) / rad; rgb = lookupColor(normPixelDist); float maxAlpha = .6f + .4f * (1.f-p.z); maxAlpha = kCircleMaxAlpha * fmaxf(fminf(maxAlpha, 1.f), 0.f); // kCircleMaxAlpha * clamped value alpha = maxAlpha * exp(-1.f * falloffScale * normPixelDist * normPixelDist); } else { // simple: each circle has an assigned color int index3 = 3 * circleIndex; rgb = *(float3*)&(cuConstRendererParams.color[index3]); alpha = .5f; } float oneMinusAlpha = 1.f - alpha; // BEGIN SHOULD-BE-ATOMIC REGION // global memory read float4 existingColor = *imagePtr; float4 newColor; newColor.x = alpha * rgb.x + oneMinusAlpha * existingColor.x; newColor.y = alpha * rgb.y + oneMinusAlpha * existingColor.y; newColor.z = alpha * rgb.z + oneMinusAlpha * existingColor.z; newColor.w = alpha + existingColor.w; // global memory write *imagePtr = newColor; // END SHOULD-BE-ATOMIC REGION } // kernelRenderCircles -- (CUDA device code) // // Each thread renders a circle. Since there is no protection to // ensure order of update or mutual exclusion on the output image, the // resulting image will be incorrect. __global__ void kernelRenderCircles() { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= cuConstRendererParams.numCircles) return; int index3 = 3 * index; // read position and radius float3 p = *(float3*)(&cuConstRendererParams.position[index3]); float rad = cuConstRendererParams.radius[index]; // compute the bounding box of the circle. The bound is in integer // screen coordinates, so it's clamped to the edges of the screen. short imageWidth = cuConstRendererParams.imageWidth; short imageHeight = cuConstRendererParams.imageHeight; short minX = static_cast<short>(imageWidth * (p.x - rad)); short maxX = static_cast<short>(imageWidth * (p.x + rad)) + 1; short minY = static_cast<short>(imageHeight * (p.y - rad)); short maxY = static_cast<short>(imageHeight * (p.y + rad)) + 1; // a bunch of clamps. Is there a CUDA built-in for this? short screenMinX = (minX > 0) ? ((minX < imageWidth) ? minX : imageWidth) : 0; short screenMaxX = (maxX > 0) ? ((maxX < imageWidth) ? maxX : imageWidth) : 0; short screenMinY = (minY > 0) ? ((minY < imageHeight) ? minY : imageHeight) : 0; short screenMaxY = (maxY > 0) ? ((maxY < imageHeight) ? maxY : imageHeight) : 0; float invWidth = 1.f / imageWidth; float invHeight = 1.f / imageHeight; // for all pixels in the bonding box for (int pixelY=screenMinY; pixelY<screenMaxY; pixelY++) { float4* imgPtr = (float4*)(&cuConstRendererParams.imageData[4 * (pixelY * imageWidth + screenMinX)]); for (int pixelX=screenMinX; pixelX<screenMaxX; pixelX++) { float2 pixelCenterNorm = make_float2(invWidth * (static_cast<float>(pixelX) + 0.5f), invHeight * (static_cast<float>(pixelY) + 0.5f)); shadePixel(index, pixelCenterNorm, p, imgPtr); imgPtr++; } } } //////////////////////////////////////////////////////////////////////////////////////// CudaRenderer::CudaRenderer() { image = NULL; numCircles = 0; position = NULL; velocity = NULL; color = NULL; radius = NULL; cudaDevicePosition = NULL; cudaDeviceVelocity = NULL; cudaDeviceColor = NULL; cudaDeviceRadius = NULL; cudaDeviceImageData = NULL; } CudaRenderer::~CudaRenderer() { if (image) { delete image; } if (position) { delete [] position; delete [] velocity; delete [] color; delete [] radius; } if (cudaDevicePosition) { cudaFree(cudaDevicePosition); cudaFree(cudaDeviceVelocity); cudaFree(cudaDeviceColor); cudaFree(cudaDeviceRadius); cudaFree(cudaDeviceImageData); } } const Image* CudaRenderer::getImage() { // need to copy contents of the rendered image from device memory // before we expose the Image object to the caller printf("Copying image data from device\n"); cudaMemcpy(image->data, cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height, cudaMemcpyDeviceToHost); return image; } void CudaRenderer::loadScene(SceneName scene) { sceneName = scene; loadCircleScene(sceneName, numCircles, position, velocity, color, radius); } void CudaRenderer::setup() { int deviceCount = 0; std::string name; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CudaRenderer\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); // By this time the scene should be loaded. Now copy all the key // data structures into device memory so they are accessible to // CUDA kernels // // See the CUDA Programmer's Guide for descriptions of // cudaMalloc and cudaMemcpy cudaMalloc(&cudaDevicePosition, sizeof(float) * 3 * numCircles); cudaMalloc(&cudaDeviceVelocity, sizeof(float) * 3 * numCircles); cudaMalloc(&cudaDeviceColor, sizeof(float) * 3 * numCircles); cudaMalloc(&cudaDeviceRadius, sizeof(float) * numCircles); cudaMalloc(&cudaDeviceImageData, sizeof(float) * 4 * image->width * image->height); cudaMemcpy(cudaDevicePosition, position, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice); cudaMemcpy(cudaDeviceVelocity, velocity, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice); cudaMemcpy(cudaDeviceColor, color, sizeof(float) * 3 * numCircles, cudaMemcpyHostToDevice); cudaMemcpy(cudaDeviceRadius, radius, sizeof(float) * numCircles, cudaMemcpyHostToDevice); // Initialize parameters in constant memory. We didn't talk about // constant memory in class, but the use of read-only constant // memory here is an optimization over just sticking these values // in device global memory. NVIDIA GPUs have a few special tricks // for optimizing access to constant memory. Using global memory // here would have worked just as well. See the Programmer's // Guide for more information about constant memory. GlobalConstants params; params.sceneName = sceneName; params.numCircles = numCircles; params.imageWidth = image->width; params.imageHeight = image->height; params.position = cudaDevicePosition; params.velocity = cudaDeviceVelocity; params.color = cudaDeviceColor; params.radius = cudaDeviceRadius; params.imageData = cudaDeviceImageData; cudaMemcpyToSymbol(cuConstRendererParams, &params, sizeof(GlobalConstants)); // also need to copy over the noise lookup tables, so we can // implement noise on the GPU int* permX; int* permY; float* value1D; getNoiseTables(&permX, &permY, &value1D); cudaMemcpyToSymbol(cuConstNoiseXPermutationTable, permX, sizeof(int) * 256); cudaMemcpyToSymbol(cuConstNoiseYPermutationTable, permY, sizeof(int) * 256); cudaMemcpyToSymbol(cuConstNoise1DValueTable, value1D, sizeof(float) * 256); // last, copy over the color table that's used by the shading // function for circles in the snowflake demo float lookupTable[COLOR_MAP_SIZE][3] = { {1.f, 1.f, 1.f}, {1.f, 1.f, 1.f}, {.8f, .9f, 1.f}, {.8f, .9f, 1.f}, {.8f, 0.8f, 1.f}, }; cudaMemcpyToSymbol(cuConstColorRamp, lookupTable, sizeof(float) * 3 * COLOR_MAP_SIZE); } // allocOutputImage -- // // Allocate buffer the renderer will render into. Check status of // image first to avoid memory leak. void CudaRenderer::allocOutputImage(int width, int height) { if (image) delete image; image = new Image(width, height); } // clearImage -- // // Clear's the renderer's target image. The state of the image after // the clear depends on the scene being rendered. void CudaRenderer::clearImage() { // 256 threads per block is a healthy number dim3 blockDim(16, 16, 1); dim3 gridDim( (image->width + blockDim.x - 1) / blockDim.x, (image->height + blockDim.y - 1) / blockDim.y); if (sceneName == SNOWFLAKES || sceneName == SNOWFLAKES_SINGLE_FRAME) { kernelClearImageSnowflake<<<gridDim, blockDim>>>(); } else { kernelClearImage<<<gridDim, blockDim>>>(1.f, 1.f, 1.f, 1.f); } cudaDeviceSynchronize(); } // advanceAnimation -- // // Advance the simulation one time step. Updates all circle positions // and velocities void CudaRenderer::advanceAnimation() { // 256 threads per block is a healthy number dim3 blockDim(256, 1); dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x); // only the snowflake scene has animation if (sceneName == SNOWFLAKES) { kernelAdvanceSnowflake<<<gridDim, blockDim>>>(); } else if (sceneName == BOUNCING_BALLS) { kernelAdvanceBouncingBalls<<<gridDim, blockDim>>>(); } else if (sceneName == HYPNOSIS) { kernelAdvanceHypnosis<<<gridDim, blockDim>>>(); } else if (sceneName == FIREWORKS) { kernelAdvanceFireWorks<<<gridDim, blockDim>>>(); } cudaDeviceSynchronize(); } void CudaRenderer::render() { // 256 threads per block is a healthy number dim3 blockDim(256, 1); dim3 gridDim((numCircles + blockDim.x - 1) / blockDim.x); kernelRenderCircles<<<gridDim, blockDim>>>(); cudaDeviceSynchronize(); }
b145e20fbe52ab4f461da90ab6bb5a9b6e889054.hip
// !!! This is a file automatically generated by hipify!!! #include <catboost/cuda/cuda_util/kernel/sort_templ.cuh> namespace NKernel { template hipError_t RadixSort(ui32* keys, uchar* values, ui32 size, TRadixSortContext& context, TCudaStream stream); }
b145e20fbe52ab4f461da90ab6bb5a9b6e889054.cu
#include <catboost/cuda/cuda_util/kernel/sort_templ.cuh> namespace NKernel { template cudaError_t RadixSort(ui32* keys, uchar* values, ui32 size, TRadixSortContext& context, TCudaStream stream); }
f9d1bd02cf0f0a29b08d0f1e18ed0d6325d03e46.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THHUNN.h" #include "THHDeviceTensor.cuh" #include "THHDeviceTensorUtils.cuh" #include "THHDeviceUtils.cuh" #include "THHReduceApplyUtils.cuh" #include "utils.h" #include "common.h" __global__ void SpatialUnskew_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2) * output.getSize(3)) { return; } int outputPointX = outputPointId % output.getSize(3); int outputPointY = outputPointId / output.getSize(3); int offset = outputPointY; int inputPointX = outputPointX + offset; int inputPointY = outputPointY; float valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } static int extracunn_SpatialUnskew_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCudaTensor_nDimension(state, input); THArgCheck(numInputDims == 3 || numInputDims == 4, 2, "input must be 3 or 4-dimensional"); if (numInputDims == 4) { numBatch = THCudaTensor_size(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCudaTensor_size(state, input, planeDim); int inputH = THCudaTensor_size(state, input, dimh); int inputW = THCudaTensor_size(state, input, dimw); int outputH = inputH; int outputW = inputW - inputH + 1; THCDeviceTensor<float, 4> devInput; THCDeviceTensor<float, 4> devOutput; if (numInputDims == 3) { THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>(); } else { THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 4>(state, input); devOutput = toDeviceTensor<float, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialUnskew_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devInput, devOutput); return 1; } __global__ void SpatialUnskew_updateGradInput( THCDeviceTensor<float, 4> gradInput, THCDeviceTensor<float, 4> gradOutput) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) { return; } int outputPointX = outputPointId % gradOutput.getSize(3); int outputPointY = outputPointId / gradOutput.getSize(3); int offset = outputPointY; int inputPointX = outputPointX + offset; int inputPointY = outputPointY; float valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; //atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); gradInput[batch][plane][inputPointY][inputPointX] = valueToCopy; } static int extracunn_SpatialUnskew_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); // Inputs THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCudaTensor_nDimension(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); THCDeviceTensor<float, 4> devGradInput; THCDeviceTensor<float, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<float, 4>(state, gradInput); devGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); hipLaunchKernelGGL(( SpatialUnskew_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state), devGradInput, devGradOutput); return 1; } static const struct luaL_Reg extracunn_SpatialUnskew__ [] = { {"SpatialUnskew_updateOutput", extracunn_SpatialUnskew_updateOutput}, {"SpatialUnskew_updateGradInput", extracunn_SpatialUnskew_updateGradInput}, {NULL, NULL} }; void extracunn_SpatialUnskew_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, extracunn_SpatialUnskew__, "nn"); lua_pop(L,1); }
f9d1bd02cf0f0a29b08d0f1e18ed0d6325d03e46.cu
#include "THCUNN.h" #include "THCDeviceTensor.cuh" #include "THCDeviceTensorUtils.cuh" #include "THCDeviceUtils.cuh" #include "THCReduceApplyUtils.cuh" #include "utils.h" #include "common.h" __global__ void SpatialUnskew_updateOutput( THCDeviceTensor<float, 4> input, THCDeviceTensor<float, 4> output) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= output.getSize(2) * output.getSize(3)) { return; } int outputPointX = outputPointId % output.getSize(3); int outputPointY = outputPointId / output.getSize(3); int offset = outputPointY; int inputPointX = outputPointX + offset; int inputPointY = outputPointY; float valueToCopy = input[batch][plane][inputPointY][inputPointX]; output[batch][plane][outputPointY][outputPointX] = valueToCopy; } static int extracunn_SpatialUnskew_updateOutput(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_getfieldcheckudata(L, 1, "output", "torch.CudaTensor"); int planeDim = 0; int dimh = 1; int dimw = 2; int numBatch = 1; int numInputDims = THCudaTensor_nDimension(state, input); THArgCheck(numInputDims == 3 || numInputDims == 4, 2, "input must be 3 or 4-dimensional"); if (numInputDims == 4) { numBatch = THCudaTensor_size(state, input, 0); planeDim++; dimh++; dimw++; } int numPlanes = THCudaTensor_size(state, input, planeDim); int inputH = THCudaTensor_size(state, input, dimh); int inputW = THCudaTensor_size(state, input, dimw); int outputH = inputH; int outputW = inputW - inputH + 1; THCDeviceTensor<float, 4> devInput; THCDeviceTensor<float, 4> devOutput; if (numInputDims == 3) { THCudaTensor_resize3d(state, output, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 3>(state, input).upcastOuter<4>(); devOutput = toDeviceTensor<float, 3>(state, output).upcastOuter<4>(); } else { THCudaTensor_resize4d(state, output, numBatch, numPlanes, outputH, outputW); devInput = toDeviceTensor<float, 4>(state, input); devOutput = toDeviceTensor<float, 4>(state, output); } int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devOutput.getSize(1), devOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialUnskew_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devInput, devOutput); return 1; } __global__ void SpatialUnskew_updateGradInput( THCDeviceTensor<float, 4> gradInput, THCDeviceTensor<float, 4> gradOutput) { int outputPointId = threadIdx.x + blockIdx.x * blockDim.x; int plane = blockIdx.y; int batch = blockIdx.z; if (outputPointId >= gradOutput.getSize(2) * gradOutput.getSize(3)) { return; } int outputPointX = outputPointId % gradOutput.getSize(3); int outputPointY = outputPointId / gradOutput.getSize(3); int offset = outputPointY; int inputPointX = outputPointX + offset; int inputPointY = outputPointY; float valueToCopy = gradOutput[batch][plane][outputPointY][outputPointX]; //atomicAdd(&gradInput[batch][plane][inputPointY][inputPointX], valueToCopy); gradInput[batch][plane][inputPointY][inputPointX] = valueToCopy; } static int extracunn_SpatialUnskew_updateGradInput(lua_State *L) { THCState *state = getCutorchState(L); // Inputs THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *gradOutput = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *gradInput = (THCudaTensor *)luaT_getfieldcheckudata(L, 1, "gradInput", "torch.CudaTensor"); int planeDim = 0; int dimh = 1; int dimw = 2; int numInputDims = THCudaTensor_nDimension(state, input); if (numInputDims == 4) { planeDim++; dimh++; dimw++; } THCudaTensor_resizeAs(state, gradInput, input); THCudaTensor_zero(state, gradInput); THCDeviceTensor<float, 4> devGradInput; THCDeviceTensor<float, 4> devGradOutput; if (numInputDims == 3) { devGradInput = toDeviceTensor<float, 3>(state, gradInput).upcastOuter<4>(); devGradOutput = toDeviceTensor<float, 3>(state, gradOutput).upcastOuter<4>(); } else { devGradInput = toDeviceTensor<float, 4>(state, gradInput); devGradOutput = toDeviceTensor<float, 4>(state, gradOutput); } int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3); dim3 gridSize(THCCeilDiv(outputPlaneSize, 256), devGradOutput.getSize(1), devGradOutput.getSize(0)); dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize); SpatialUnskew_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>( devGradInput, devGradOutput); return 1; } static const struct luaL_Reg extracunn_SpatialUnskew__ [] = { {"SpatialUnskew_updateOutput", extracunn_SpatialUnskew_updateOutput}, {"SpatialUnskew_updateGradInput", extracunn_SpatialUnskew_updateGradInput}, {NULL, NULL} }; void extracunn_SpatialUnskew_init(lua_State *L) { luaT_pushmetatable(L, "torch.CudaTensor"); luaT_registeratname(L, extracunn_SpatialUnskew__, "nn"); lua_pop(L,1); }
ed3bc4e9440bf081df7eae804c18fd5313273e7f.hip
// !!! This is a file automatically generated by hipify!!! #include "G4HepEmElectronData.hh" #include <iostream> #include <hip/hip_runtime.h> #include "G4HepEmCuUtils.hh" void CopyElectronDataToDevice(struct G4HepEmElectronData* onHOST, struct G4HepEmElectronDataOnDevice** onDEVICE) { if ( !onHOST ) return; // clean away previous (if any) if ( *onDEVICE ) { FreeElectronDataOnDevice ( onDEVICE ); } // Create a G4HepEmElectronDataOnDevice structure to store pointers to _d // side arrays on the _h side. struct G4HepEmElectronDataOnDevice* elDataHTo_d = new G4HepEmElectronDataOnDevice; elDataHTo_d->fNumMatCuts = onHOST->fNumMatCuts; int numHepEmMatCuts = elDataHTo_d->fNumMatCuts; // // === ELoss data: // // set non-pointer members of the host side strcuture elDataHTo_d->fELossEnergyGridSize = onHOST->fELossEnergyGridSize; elDataHTo_d->fELossLogMinEkin = onHOST->fELossLogMinEkin; elDataHTo_d->fELossEILDelta = onHOST->fELossEILDelta; // allocate memory on _d for the ELoss energy grid and copy form _h int numELossData = onHOST->fELossEnergyGridSize; gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossEnergyGrid), sizeof( double ) *numELossData ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossEnergyGrid, onHOST->fELossEnergyGrid, sizeof( double ) *numELossData, hipMemcpyHostToDevice ) ); // // allocate data on the host for the re-aranged ELossData arrays double* elDataRange_h = new double[numELossData*numHepEmMatCuts]; double* elDataRangeSD_h = new double[numELossData*numHepEmMatCuts]; double* elDataDEDX_h = new double[numELossData*numHepEmMatCuts]; double* elDataDEDXSD_h = new double[numELossData*numHepEmMatCuts]; double* elDataInvRangeSD_h = new double[numELossData*numHepEmMatCuts]; int indxCont = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int iRangeStart = imc*5*numELossData; int iDEDXStarts = iRangeStart+2*numELossData; int iIRanSDStarts = iRangeStart+4*numELossData; for (int i=0; i<numELossData; ++i) { elDataRange_h[indxCont] = onHOST->fELossData[iRangeStart +2*i ]; // Range elDataRangeSD_h[indxCont] = onHOST->fELossData[iRangeStart +2*i+1]; // its SD elDataDEDX_h[indxCont] = onHOST->fELossData[iDEDXStarts +2*i ]; // DEDX elDataDEDXSD_h[indxCont] = onHOST->fELossData[iDEDXStarts +2*i+1]; // its SD elDataInvRangeSD_h[indxCont++] = onHOST->fELossData[iIRanSDStarts+ i ]; // inv. range SD } } // allocate memory on the device and copy the loss data arrays to _d std::size_t theELossDataSize = sizeof( double )*numELossData*numHepEmMatCuts; gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossDataRange), theELossDataSize ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossDataRangeSD), theELossDataSize ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossDataDEDX), theELossDataSize ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossDataDEDXSD), theELossDataSize ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fELossDataInvRangeSD), theELossDataSize ) ); // gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossDataRange, elDataRange_h, theELossDataSize, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossDataRangeSD, elDataRangeSD_h, theELossDataSize, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossDataDEDX, elDataDEDX_h, theELossDataSize, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossDataDEDXSD, elDataDEDXSD_h, theELossDataSize, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fELossDataInvRangeSD, elDataInvRangeSD_h, theELossDataSize, hipMemcpyHostToDevice ) ); // free auxilary memorys allocated on host delete[] elDataRange_h; delete[] elDataRangeSD_h; delete[] elDataDEDX_h; delete[] elDataDEDXSD_h; delete[] elDataInvRangeSD_h; // // === Restricted macroscopic scross section data: // // allocate memory for all arrays on _d int* ioniDataStart_h = new int[numHepEmMatCuts]; int* ioniNumData_h = new int[numHepEmMatCuts]; int* bremDataStart_h = new int[numHepEmMatCuts]; int* bremNumData_h = new int[numHepEmMatCuts]; double* ioniAuxData_h = new double[4*numHepEmMatCuts]; double* bremAuxData_h = new double[4*numHepEmMatCuts]; gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecIoniDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecNumIoniData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecBremDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecNumBremData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecIoniAuxData), sizeof( double ) *4*numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecBremAuxData), sizeof( double ) *4*numHepEmMatCuts ) ); // run through the fResMacXSecData and count the size of the sum ioni/brem data int sumIoniData = 0; int sumBremData = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int is = onHOST->fResMacXSecStartIndexPerMatCut[imc]; int numIoni = (int)onHOST->fResMacXSecData[is]; int numBrem = (int)onHOST->fResMacXSecData[is+5+3*numIoni]; sumIoniData += numIoni; sumBremData += numBrem; } double* ioniEData_h = new double[sumIoniData]; double* ioniData_h = new double[sumIoniData]; double* ioniSDData_h = new double[sumIoniData]; double* bremEData_h = new double[sumBremData]; double* bremData_h = new double[sumBremData]; double* bremSDData_h = new double[sumBremData]; // gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecIoniEData), sizeof( double ) *sumIoniData ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecIoniData), sizeof( double ) *sumIoniData ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecIoniSDData), sizeof( double ) *sumIoniData ) ); // gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecBremEData), sizeof( double ) *sumBremData ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecBremData), sizeof( double ) *sumBremData ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fResMacXSecBremSDData), sizeof( double ) *sumBremData ) ); // // populate all host side arrays int indxContIoni = 0; int indxContBrem = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int is = onHOST->fResMacXSecStartIndexPerMatCut[imc]; int ioniStrt = is; int numIoni = (int)onHOST->fResMacXSecData[ioniStrt]; int bremStrt = ioniStrt+5+3*numIoni; int numBrem = (int)onHOST->fResMacXSecData[bremStrt]; // fill in where the ioni/brem data starts for this (index = imc) material // cuts couple in the continuous data arrays ioniDataStart_h[imc] = indxContIoni; bremDataStart_h[imc] = indxContBrem; // the number of ioni/brem data for this mat-cuts couple (auxilary data stored // separately on the device) ioniNumData_h[imc] = numIoni; bremNumData_h[imc] = numBrem; // fill in the 4 remaining (the first was the #data) auxilary data ioniAuxData_h[4*imc+0] = onHOST->fResMacXSecData[ioniStrt+1]; // max-E ioniAuxData_h[4*imc+1] = onHOST->fResMacXSecData[ioniStrt+2]; // max-Val ioniAuxData_h[4*imc+2] = onHOST->fResMacXSecData[ioniStrt+3]; // log(E_0) ioniAuxData_h[4*imc+3] = onHOST->fResMacXSecData[ioniStrt+4]; // 1/log-delta // bremAuxData_h[4*imc+0] = onHOST->fResMacXSecData[bremStrt+1]; // max-E bremAuxData_h[4*imc+1] = onHOST->fResMacXSecData[bremStrt+2]; // max-Val bremAuxData_h[4*imc+2] = onHOST->fResMacXSecData[bremStrt+3]; // log(E_0) bremAuxData_h[4*imc+3] = onHOST->fResMacXSecData[bremStrt+4]; // 1/log-delta // fill in the ioni and brem data for (int i=0; i<numIoni; ++i) { ioniEData_h[indxContIoni] = onHOST->fResMacXSecData[ioniStrt+5+3*i]; ioniData_h[indxContIoni] = onHOST->fResMacXSecData[ioniStrt+5+3*i+1]; ioniSDData_h[indxContIoni++] = onHOST->fResMacXSecData[ioniStrt+5+3*i+2]; } for (int i=0; i<numBrem; ++i) { bremEData_h[indxContBrem] = onHOST->fResMacXSecData[bremStrt+5+3*i]; bremData_h[indxContBrem] = onHOST->fResMacXSecData[bremStrt+5+3*i+1]; bremSDData_h[indxContBrem++] = onHOST->fResMacXSecData[bremStrt+5+3*i+2]; } } // // copy all array data from _h to _d // // Ioni: gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecIoniDataStart, ioniDataStart_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecNumIoniData, ioniNumData_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecIoniAuxData, ioniAuxData_h, sizeof( double ) *4*numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecIoniEData, ioniEData_h, sizeof( double ) *sumIoniData, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecIoniData, ioniData_h, sizeof( double ) *sumIoniData, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecIoniSDData, ioniSDData_h, sizeof( double ) *sumIoniData, hipMemcpyHostToDevice ) ); // // brem: gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecBremDataStart, bremDataStart_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecNumBremData, bremNumData_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecBremAuxData, bremAuxData_h, sizeof( double ) *4*numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecBremEData, bremEData_h, sizeof( double ) *sumBremData, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecBremData, bremData_h, sizeof( double ) *sumBremData, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fResMacXSecBremSDData, bremSDData_h, sizeof( double ) *sumBremData, hipMemcpyHostToDevice ) ); // // free all auxilary memory allocated on the host side delete[] ioniDataStart_h; delete[] ioniNumData_h; delete[] ioniAuxData_h; delete[] ioniEData_h; delete[] ioniData_h; delete[] ioniSDData_h; // delete[] bremDataStart_h; delete[] bremNumData_h; delete[] bremAuxData_h; delete[] bremEData_h; delete[] bremData_h; delete[] bremSDData_h; // // === Target element selector data (for ioni and brem EM models) // // allocate data for all arrays on _h and _d int* numElements_h = new int[numHepEmMatCuts]; int* ioniStart_h = new int[numHepEmMatCuts]; int* numIoni_h = new int[numHepEmMatCuts]; ioniAuxData_h = new double[2*numHepEmMatCuts]; int numIoniData = onHOST->fElemSelectorIoniNumData; ioniData_h = new double[numIoniData]; // gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorNumElements), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorIoniDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorNumIoniData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorIoniAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorIoniData), sizeof( double ) *numIoniData ) ); // int* bremSBStart_h = new int[numHepEmMatCuts]; int* numBremSB_h = new int[numHepEmMatCuts]; double* bremSBAuxData_h = new double[2*numHepEmMatCuts]; int numBremSBData = onHOST->fElemSelectorBremSBNumData; double* bremSBData_h = new double[numBremSBData]; // gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremSBDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorNumBremSBData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremSBAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremSBData), sizeof( double ) *numBremSBData ) ); // int* bremRBStart_h = new int[numHepEmMatCuts]; int* numBremRB_h = new int[numHepEmMatCuts]; double* bremRBAuxData_h = new double[2*numHepEmMatCuts]; int numBremRBData = onHOST->fElemSelectorBremRBNumData; double* bremRBData_h = new double[numBremRBData]; // gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremRBDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorNumBremRBData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremRBAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( hipMalloc ( &(elDataHTo_d->fElemSelectorBremRBData), sizeof( double ) *numBremRBData ) ); // // populate the host side arrays with data indxContIoni = 0; int indxContBremSB = 0; int indxContBremRB = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { // ioni: Moller-Bhabha int iStart = onHOST->fElemSelectorIoniStartIndexPerMatCut[imc]; ioniStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { ioniStart_h[imc] = indxContIoni; numIoni_h[imc] = onHOST->fElemSelectorIoniData[iStart]; numElements_h[imc] = onHOST->fElemSelectorIoniData[iStart+1]; ioniAuxData_h[2*imc] = onHOST->fElemSelectorIoniData[iStart+2]; ioniAuxData_h[2*imc+1] = onHOST->fElemSelectorIoniData[iStart+3]; int allData = numIoni_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { ioniData_h[indxContIoni++] = onHOST->fElemSelectorIoniData[iStart+4+i]; } } // brem: Seltzer-Berger iStart = onHOST->fElemSelectorBremSBStartIndexPerMatCut[imc]; bremSBStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { bremSBStart_h[imc] = indxContBremSB; numBremSB_h[imc] = onHOST->fElemSelectorBremSBData[iStart]; numElements_h[imc] = onHOST->fElemSelectorBremSBData[iStart+1]; bremSBAuxData_h[2*imc] = onHOST->fElemSelectorBremSBData[iStart+2]; bremSBAuxData_h[2*imc+1] = onHOST->fElemSelectorBremSBData[iStart+3]; int allData = numBremSB_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { bremSBData_h[indxContBremSB++] = onHOST->fElemSelectorBremSBData[iStart+4+i]; } } // brem: relativistic iStart = onHOST->fElemSelectorBremRBStartIndexPerMatCut[imc]; bremRBStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { bremRBStart_h[imc] = indxContBremRB; numBremRB_h[imc] = onHOST->fElemSelectorBremRBData[iStart]; numElements_h[imc] = onHOST->fElemSelectorBremRBData[iStart+1]; bremRBAuxData_h[2*imc] = onHOST->fElemSelectorBremRBData[iStart+2]; bremRBAuxData_h[2*imc+1] = onHOST->fElemSelectorBremRBData[iStart+3]; int allData = numBremRB_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { bremRBData_h[indxContBremRB++] = onHOST->fElemSelectorBremRBData[iStart+4+i]; } } } // copy from _h to _d all arrays gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorNumElements, numElements_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); // ioni gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorIoniDataStart, ioniStart_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorNumIoniData, numIoni_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorIoniAuxData, ioniAuxData_h, sizeof( double ) *2*numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorIoniData, ioniData_h, sizeof( double ) *indxContIoni, hipMemcpyHostToDevice ) ); // brem: Seltzer-Berger gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremSBDataStart, bremSBStart_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorNumBremSBData, numBremSB_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremSBAuxData, bremSBAuxData_h, sizeof( double ) *2*numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremSBData, bremSBData_h, sizeof( double ) *indxContBremSB, hipMemcpyHostToDevice ) ); // brem: rel. brem gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremRBDataStart, bremRBStart_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorNumBremRBData, numBremRB_h, sizeof( int ) *numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremRBAuxData, bremRBAuxData_h, sizeof( double ) *2*numHepEmMatCuts, hipMemcpyHostToDevice ) ); gpuErrchk ( hipMemcpy ( elDataHTo_d->fElemSelectorBremRBData, bremRBData_h, sizeof( double ) *indxContBremRB, hipMemcpyHostToDevice ) ); // // clean all dynamically allocated auxilary host memory delete[] numElements_h; // delete[] ioniStart_h; delete[] numIoni_h; delete[] ioniAuxData_h; delete[] ioniData_h; // delete[] bremSBStart_h; delete[] numBremSB_h; delete[] bremSBAuxData_h; delete[] bremSBData_h; // delete[] bremRBStart_h; delete[] numBremRB_h; delete[] bremRBAuxData_h; delete[] bremRBData_h; // // then finaly copy the top level, i.e. the main struct with the already // appropriate pointers to device side memory locations but stored on the host gpuErrchk ( hipMalloc ( onDEVICE, sizeof( struct G4HepEmElectronDataOnDevice ) ) ); gpuErrchk ( hipMemcpy ( *onDEVICE, elDataHTo_d, sizeof( struct G4HepEmElectronDataOnDevice ), hipMemcpyHostToDevice ) ); // and clean delete elDataHTo_d; } void FreeElectronDataOnDevice(struct G4HepEmElectronDataOnDevice** onDEVICE) { if (*onDEVICE) { // copy the on-device data bakc to host in order to be able to free the device // side dynamically allocated memories struct G4HepEmElectronDataOnDevice* onHostTo_d = new G4HepEmElectronDataOnDevice; gpuErrchk ( hipMemcpy( onHostTo_d, onDEVICE, sizeof( struct G4HepEmElectronDataOnDevice ), hipMemcpyDeviceToHost ) ); // ELoss data hipFree( onHostTo_d->fELossEnergyGrid ); hipFree( onHostTo_d->fELossDataRange ); hipFree( onHostTo_d->fELossDataRangeSD ); hipFree( onHostTo_d->fELossDataDEDX ); hipFree( onHostTo_d->fELossDataDEDXSD ); hipFree( onHostTo_d->fELossDataInvRangeSD ); // Macr. cross sections for ioni/brem hipFree( onHostTo_d->fResMacXSecIoniDataStart ); hipFree( onHostTo_d->fResMacXSecNumIoniData ); hipFree( onHostTo_d->fResMacXSecBremDataStart ); hipFree( onHostTo_d->fResMacXSecNumBremData ); hipFree( onHostTo_d->fResMacXSecIoniAuxData ); hipFree( onHostTo_d->fResMacXSecIoniEData ); hipFree( onHostTo_d->fResMacXSecIoniData ); hipFree( onHostTo_d->fResMacXSecIoniSDData ); hipFree( onHostTo_d->fResMacXSecBremAuxData ); hipFree( onHostTo_d->fResMacXSecBremEData ); hipFree( onHostTo_d->fResMacXSecBremData ); hipFree( onHostTo_d->fResMacXSecBremSDData ); // Target element selectors for ioni and brem models hipFree( onHostTo_d->fElemSelectorNumElements ); hipFree( onHostTo_d->fElemSelectorIoniDataStart ); hipFree( onHostTo_d->fElemSelectorNumIoniData ); hipFree( onHostTo_d->fElemSelectorBremSBDataStart ); hipFree( onHostTo_d->fElemSelectorNumBremSBData ); hipFree( onHostTo_d->fElemSelectorBremRBDataStart ); hipFree( onHostTo_d->fElemSelectorNumBremRBData ); hipFree( onHostTo_d->fElemSelectorIoniAuxData ); hipFree( onHostTo_d->fElemSelectorBremSBAuxData ); hipFree( onHostTo_d->fElemSelectorBremRBAuxData ); hipFree( onHostTo_d->fElemSelectorIoniData ); hipFree( onHostTo_d->fElemSelectorBremSBData ); hipFree( onHostTo_d->fElemSelectorBremRBData ); // free the remaining device side electron data and set the host side ptr to null hipFree( *onDEVICE ); *onDEVICE = nullptr; } }
ed3bc4e9440bf081df7eae804c18fd5313273e7f.cu
#include "G4HepEmElectronData.hh" #include <iostream> #include <cuda_runtime.h> #include "G4HepEmCuUtils.hh" void CopyElectronDataToDevice(struct G4HepEmElectronData* onHOST, struct G4HepEmElectronDataOnDevice** onDEVICE) { if ( !onHOST ) return; // clean away previous (if any) if ( *onDEVICE ) { FreeElectronDataOnDevice ( onDEVICE ); } // Create a G4HepEmElectronDataOnDevice structure to store pointers to _d // side arrays on the _h side. struct G4HepEmElectronDataOnDevice* elDataHTo_d = new G4HepEmElectronDataOnDevice; elDataHTo_d->fNumMatCuts = onHOST->fNumMatCuts; int numHepEmMatCuts = elDataHTo_d->fNumMatCuts; // // === ELoss data: // // set non-pointer members of the host side strcuture elDataHTo_d->fELossEnergyGridSize = onHOST->fELossEnergyGridSize; elDataHTo_d->fELossLogMinEkin = onHOST->fELossLogMinEkin; elDataHTo_d->fELossEILDelta = onHOST->fELossEILDelta; // allocate memory on _d for the ELoss energy grid and copy form _h int numELossData = onHOST->fELossEnergyGridSize; gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossEnergyGrid), sizeof( double ) *numELossData ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossEnergyGrid, onHOST->fELossEnergyGrid, sizeof( double ) *numELossData, cudaMemcpyHostToDevice ) ); // // allocate data on the host for the re-aranged ELossData arrays double* elDataRange_h = new double[numELossData*numHepEmMatCuts]; double* elDataRangeSD_h = new double[numELossData*numHepEmMatCuts]; double* elDataDEDX_h = new double[numELossData*numHepEmMatCuts]; double* elDataDEDXSD_h = new double[numELossData*numHepEmMatCuts]; double* elDataInvRangeSD_h = new double[numELossData*numHepEmMatCuts]; int indxCont = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int iRangeStart = imc*5*numELossData; int iDEDXStarts = iRangeStart+2*numELossData; int iIRanSDStarts = iRangeStart+4*numELossData; for (int i=0; i<numELossData; ++i) { elDataRange_h[indxCont] = onHOST->fELossData[iRangeStart +2*i ]; // Range elDataRangeSD_h[indxCont] = onHOST->fELossData[iRangeStart +2*i+1]; // its SD elDataDEDX_h[indxCont] = onHOST->fELossData[iDEDXStarts +2*i ]; // DEDX elDataDEDXSD_h[indxCont] = onHOST->fELossData[iDEDXStarts +2*i+1]; // its SD elDataInvRangeSD_h[indxCont++] = onHOST->fELossData[iIRanSDStarts+ i ]; // inv. range SD } } // allocate memory on the device and copy the loss data arrays to _d std::size_t theELossDataSize = sizeof( double )*numELossData*numHepEmMatCuts; gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossDataRange), theELossDataSize ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossDataRangeSD), theELossDataSize ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossDataDEDX), theELossDataSize ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossDataDEDXSD), theELossDataSize ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fELossDataInvRangeSD), theELossDataSize ) ); // gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossDataRange, elDataRange_h, theELossDataSize, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossDataRangeSD, elDataRangeSD_h, theELossDataSize, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossDataDEDX, elDataDEDX_h, theELossDataSize, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossDataDEDXSD, elDataDEDXSD_h, theELossDataSize, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fELossDataInvRangeSD, elDataInvRangeSD_h, theELossDataSize, cudaMemcpyHostToDevice ) ); // free auxilary memorys allocated on host delete[] elDataRange_h; delete[] elDataRangeSD_h; delete[] elDataDEDX_h; delete[] elDataDEDXSD_h; delete[] elDataInvRangeSD_h; // // === Restricted macroscopic scross section data: // // allocate memory for all arrays on _d int* ioniDataStart_h = new int[numHepEmMatCuts]; int* ioniNumData_h = new int[numHepEmMatCuts]; int* bremDataStart_h = new int[numHepEmMatCuts]; int* bremNumData_h = new int[numHepEmMatCuts]; double* ioniAuxData_h = new double[4*numHepEmMatCuts]; double* bremAuxData_h = new double[4*numHepEmMatCuts]; gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecIoniDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecNumIoniData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecBremDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecNumBremData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecIoniAuxData), sizeof( double ) *4*numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecBremAuxData), sizeof( double ) *4*numHepEmMatCuts ) ); // run through the fResMacXSecData and count the size of the sum ioni/brem data int sumIoniData = 0; int sumBremData = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int is = onHOST->fResMacXSecStartIndexPerMatCut[imc]; int numIoni = (int)onHOST->fResMacXSecData[is]; int numBrem = (int)onHOST->fResMacXSecData[is+5+3*numIoni]; sumIoniData += numIoni; sumBremData += numBrem; } double* ioniEData_h = new double[sumIoniData]; double* ioniData_h = new double[sumIoniData]; double* ioniSDData_h = new double[sumIoniData]; double* bremEData_h = new double[sumBremData]; double* bremData_h = new double[sumBremData]; double* bremSDData_h = new double[sumBremData]; // gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecIoniEData), sizeof( double ) *sumIoniData ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecIoniData), sizeof( double ) *sumIoniData ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecIoniSDData), sizeof( double ) *sumIoniData ) ); // gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecBremEData), sizeof( double ) *sumBremData ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecBremData), sizeof( double ) *sumBremData ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fResMacXSecBremSDData), sizeof( double ) *sumBremData ) ); // // populate all host side arrays int indxContIoni = 0; int indxContBrem = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { int is = onHOST->fResMacXSecStartIndexPerMatCut[imc]; int ioniStrt = is; int numIoni = (int)onHOST->fResMacXSecData[ioniStrt]; int bremStrt = ioniStrt+5+3*numIoni; int numBrem = (int)onHOST->fResMacXSecData[bremStrt]; // fill in where the ioni/brem data starts for this (index = imc) material // cuts couple in the continuous data arrays ioniDataStart_h[imc] = indxContIoni; bremDataStart_h[imc] = indxContBrem; // the number of ioni/brem data for this mat-cuts couple (auxilary data stored // separately on the device) ioniNumData_h[imc] = numIoni; bremNumData_h[imc] = numBrem; // fill in the 4 remaining (the first was the #data) auxilary data ioniAuxData_h[4*imc+0] = onHOST->fResMacXSecData[ioniStrt+1]; // max-E ioniAuxData_h[4*imc+1] = onHOST->fResMacXSecData[ioniStrt+2]; // max-Val ioniAuxData_h[4*imc+2] = onHOST->fResMacXSecData[ioniStrt+3]; // log(E_0) ioniAuxData_h[4*imc+3] = onHOST->fResMacXSecData[ioniStrt+4]; // 1/log-delta // bremAuxData_h[4*imc+0] = onHOST->fResMacXSecData[bremStrt+1]; // max-E bremAuxData_h[4*imc+1] = onHOST->fResMacXSecData[bremStrt+2]; // max-Val bremAuxData_h[4*imc+2] = onHOST->fResMacXSecData[bremStrt+3]; // log(E_0) bremAuxData_h[4*imc+3] = onHOST->fResMacXSecData[bremStrt+4]; // 1/log-delta // fill in the ioni and brem data for (int i=0; i<numIoni; ++i) { ioniEData_h[indxContIoni] = onHOST->fResMacXSecData[ioniStrt+5+3*i]; ioniData_h[indxContIoni] = onHOST->fResMacXSecData[ioniStrt+5+3*i+1]; ioniSDData_h[indxContIoni++] = onHOST->fResMacXSecData[ioniStrt+5+3*i+2]; } for (int i=0; i<numBrem; ++i) { bremEData_h[indxContBrem] = onHOST->fResMacXSecData[bremStrt+5+3*i]; bremData_h[indxContBrem] = onHOST->fResMacXSecData[bremStrt+5+3*i+1]; bremSDData_h[indxContBrem++] = onHOST->fResMacXSecData[bremStrt+5+3*i+2]; } } // // copy all array data from _h to _d // // Ioni: gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecIoniDataStart, ioniDataStart_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecNumIoniData, ioniNumData_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecIoniAuxData, ioniAuxData_h, sizeof( double ) *4*numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecIoniEData, ioniEData_h, sizeof( double ) *sumIoniData, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecIoniData, ioniData_h, sizeof( double ) *sumIoniData, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecIoniSDData, ioniSDData_h, sizeof( double ) *sumIoniData, cudaMemcpyHostToDevice ) ); // // brem: gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecBremDataStart, bremDataStart_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecNumBremData, bremNumData_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecBremAuxData, bremAuxData_h, sizeof( double ) *4*numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecBremEData, bremEData_h, sizeof( double ) *sumBremData, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecBremData, bremData_h, sizeof( double ) *sumBremData, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fResMacXSecBremSDData, bremSDData_h, sizeof( double ) *sumBremData, cudaMemcpyHostToDevice ) ); // // free all auxilary memory allocated on the host side delete[] ioniDataStart_h; delete[] ioniNumData_h; delete[] ioniAuxData_h; delete[] ioniEData_h; delete[] ioniData_h; delete[] ioniSDData_h; // delete[] bremDataStart_h; delete[] bremNumData_h; delete[] bremAuxData_h; delete[] bremEData_h; delete[] bremData_h; delete[] bremSDData_h; // // === Target element selector data (for ioni and brem EM models) // // allocate data for all arrays on _h and _d int* numElements_h = new int[numHepEmMatCuts]; int* ioniStart_h = new int[numHepEmMatCuts]; int* numIoni_h = new int[numHepEmMatCuts]; ioniAuxData_h = new double[2*numHepEmMatCuts]; int numIoniData = onHOST->fElemSelectorIoniNumData; ioniData_h = new double[numIoniData]; // gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorNumElements), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorIoniDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorNumIoniData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorIoniAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorIoniData), sizeof( double ) *numIoniData ) ); // int* bremSBStart_h = new int[numHepEmMatCuts]; int* numBremSB_h = new int[numHepEmMatCuts]; double* bremSBAuxData_h = new double[2*numHepEmMatCuts]; int numBremSBData = onHOST->fElemSelectorBremSBNumData; double* bremSBData_h = new double[numBremSBData]; // gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremSBDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorNumBremSBData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremSBAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremSBData), sizeof( double ) *numBremSBData ) ); // int* bremRBStart_h = new int[numHepEmMatCuts]; int* numBremRB_h = new int[numHepEmMatCuts]; double* bremRBAuxData_h = new double[2*numHepEmMatCuts]; int numBremRBData = onHOST->fElemSelectorBremRBNumData; double* bremRBData_h = new double[numBremRBData]; // gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremRBDataStart), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorNumBremRBData), sizeof( int ) *numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremRBAuxData), sizeof( double ) *2*numHepEmMatCuts ) ); gpuErrchk ( cudaMalloc ( &(elDataHTo_d->fElemSelectorBremRBData), sizeof( double ) *numBremRBData ) ); // // populate the host side arrays with data indxContIoni = 0; int indxContBremSB = 0; int indxContBremRB = 0; for (int imc=0; imc<numHepEmMatCuts; ++imc) { // ioni: Moller-Bhabha int iStart = onHOST->fElemSelectorIoniStartIndexPerMatCut[imc]; ioniStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { ioniStart_h[imc] = indxContIoni; numIoni_h[imc] = onHOST->fElemSelectorIoniData[iStart]; numElements_h[imc] = onHOST->fElemSelectorIoniData[iStart+1]; ioniAuxData_h[2*imc] = onHOST->fElemSelectorIoniData[iStart+2]; ioniAuxData_h[2*imc+1] = onHOST->fElemSelectorIoniData[iStart+3]; int allData = numIoni_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { ioniData_h[indxContIoni++] = onHOST->fElemSelectorIoniData[iStart+4+i]; } } // brem: Seltzer-Berger iStart = onHOST->fElemSelectorBremSBStartIndexPerMatCut[imc]; bremSBStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { bremSBStart_h[imc] = indxContBremSB; numBremSB_h[imc] = onHOST->fElemSelectorBremSBData[iStart]; numElements_h[imc] = onHOST->fElemSelectorBremSBData[iStart+1]; bremSBAuxData_h[2*imc] = onHOST->fElemSelectorBremSBData[iStart+2]; bremSBAuxData_h[2*imc+1] = onHOST->fElemSelectorBremSBData[iStart+3]; int allData = numBremSB_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { bremSBData_h[indxContBremSB++] = onHOST->fElemSelectorBremSBData[iStart+4+i]; } } // brem: relativistic iStart = onHOST->fElemSelectorBremRBStartIndexPerMatCut[imc]; bremRBStart_h[imc] = iStart; // might be -1 i.e. in case of single elemnt materials of E_min>=E_max i.e. no selector data if (iStart > -1) { bremRBStart_h[imc] = indxContBremRB; numBremRB_h[imc] = onHOST->fElemSelectorBremRBData[iStart]; numElements_h[imc] = onHOST->fElemSelectorBremRBData[iStart+1]; bremRBAuxData_h[2*imc] = onHOST->fElemSelectorBremRBData[iStart+2]; bremRBAuxData_h[2*imc+1] = onHOST->fElemSelectorBremRBData[iStart+3]; int allData = numBremRB_h[imc]*numElements_h[imc]; for (int i=0; i<allData; ++i) { bremRBData_h[indxContBremRB++] = onHOST->fElemSelectorBremRBData[iStart+4+i]; } } } // copy from _h to _d all arrays gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorNumElements, numElements_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); // ioni gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorIoniDataStart, ioniStart_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorNumIoniData, numIoni_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorIoniAuxData, ioniAuxData_h, sizeof( double ) *2*numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorIoniData, ioniData_h, sizeof( double ) *indxContIoni, cudaMemcpyHostToDevice ) ); // brem: Seltzer-Berger gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremSBDataStart, bremSBStart_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorNumBremSBData, numBremSB_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremSBAuxData, bremSBAuxData_h, sizeof( double ) *2*numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremSBData, bremSBData_h, sizeof( double ) *indxContBremSB, cudaMemcpyHostToDevice ) ); // brem: rel. brem gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremRBDataStart, bremRBStart_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorNumBremRBData, numBremRB_h, sizeof( int ) *numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremRBAuxData, bremRBAuxData_h, sizeof( double ) *2*numHepEmMatCuts, cudaMemcpyHostToDevice ) ); gpuErrchk ( cudaMemcpy ( elDataHTo_d->fElemSelectorBremRBData, bremRBData_h, sizeof( double ) *indxContBremRB, cudaMemcpyHostToDevice ) ); // // clean all dynamically allocated auxilary host memory delete[] numElements_h; // delete[] ioniStart_h; delete[] numIoni_h; delete[] ioniAuxData_h; delete[] ioniData_h; // delete[] bremSBStart_h; delete[] numBremSB_h; delete[] bremSBAuxData_h; delete[] bremSBData_h; // delete[] bremRBStart_h; delete[] numBremRB_h; delete[] bremRBAuxData_h; delete[] bremRBData_h; // // then finaly copy the top level, i.e. the main struct with the already // appropriate pointers to device side memory locations but stored on the host gpuErrchk ( cudaMalloc ( onDEVICE, sizeof( struct G4HepEmElectronDataOnDevice ) ) ); gpuErrchk ( cudaMemcpy ( *onDEVICE, elDataHTo_d, sizeof( struct G4HepEmElectronDataOnDevice ), cudaMemcpyHostToDevice ) ); // and clean delete elDataHTo_d; } void FreeElectronDataOnDevice(struct G4HepEmElectronDataOnDevice** onDEVICE) { if (*onDEVICE) { // copy the on-device data bakc to host in order to be able to free the device // side dynamically allocated memories struct G4HepEmElectronDataOnDevice* onHostTo_d = new G4HepEmElectronDataOnDevice; gpuErrchk ( cudaMemcpy( onHostTo_d, onDEVICE, sizeof( struct G4HepEmElectronDataOnDevice ), cudaMemcpyDeviceToHost ) ); // ELoss data cudaFree( onHostTo_d->fELossEnergyGrid ); cudaFree( onHostTo_d->fELossDataRange ); cudaFree( onHostTo_d->fELossDataRangeSD ); cudaFree( onHostTo_d->fELossDataDEDX ); cudaFree( onHostTo_d->fELossDataDEDXSD ); cudaFree( onHostTo_d->fELossDataInvRangeSD ); // Macr. cross sections for ioni/brem cudaFree( onHostTo_d->fResMacXSecIoniDataStart ); cudaFree( onHostTo_d->fResMacXSecNumIoniData ); cudaFree( onHostTo_d->fResMacXSecBremDataStart ); cudaFree( onHostTo_d->fResMacXSecNumBremData ); cudaFree( onHostTo_d->fResMacXSecIoniAuxData ); cudaFree( onHostTo_d->fResMacXSecIoniEData ); cudaFree( onHostTo_d->fResMacXSecIoniData ); cudaFree( onHostTo_d->fResMacXSecIoniSDData ); cudaFree( onHostTo_d->fResMacXSecBremAuxData ); cudaFree( onHostTo_d->fResMacXSecBremEData ); cudaFree( onHostTo_d->fResMacXSecBremData ); cudaFree( onHostTo_d->fResMacXSecBremSDData ); // Target element selectors for ioni and brem models cudaFree( onHostTo_d->fElemSelectorNumElements ); cudaFree( onHostTo_d->fElemSelectorIoniDataStart ); cudaFree( onHostTo_d->fElemSelectorNumIoniData ); cudaFree( onHostTo_d->fElemSelectorBremSBDataStart ); cudaFree( onHostTo_d->fElemSelectorNumBremSBData ); cudaFree( onHostTo_d->fElemSelectorBremRBDataStart ); cudaFree( onHostTo_d->fElemSelectorNumBremRBData ); cudaFree( onHostTo_d->fElemSelectorIoniAuxData ); cudaFree( onHostTo_d->fElemSelectorBremSBAuxData ); cudaFree( onHostTo_d->fElemSelectorBremRBAuxData ); cudaFree( onHostTo_d->fElemSelectorIoniData ); cudaFree( onHostTo_d->fElemSelectorBremSBData ); cudaFree( onHostTo_d->fElemSelectorBremRBData ); // free the remaining device side electron data and set the host side ptr to null cudaFree( *onDEVICE ); *onDEVICE = nullptr; } }
129656cf45a3d432dd52335c3cde0600e563e229.hip
// !!! This is a file automatically generated by hipify!!! //! Ising model evolution /*! \param G Spins on the square lattice [n-by-n] \param w Weight matrix [5-by-5] \param k Number of iterations [scalar] \param n Number of lattice points per dim [scalar] NOTE: Both matrices G and w are stored in row-major format. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <hip/hip_runtime.h> #include "ising.h" #define BLOCKDIM 16 #define THREADDIM 6 //This is (int)sqrt(MAXMOMENTSPERTHREAD) __global__ void apply_w(int * data,int * result, double * filter, int n){ __shared__ int G_sh[BLOCKDIM * THREADDIM+4][BLOCKDIM * THREADDIM+4]; __shared__ double w_sh[5][5]; if (threadIdx.x < 5 && threadIdx.y < 5) w_sh[threadIdx.x][threadIdx.y] = filter[5*threadIdx.x + threadIdx.y]; for (int x = 0; x <THREADDIM+1; x ++) { for (int y = 0; y <THREADDIM+1; y ++) { if((threadIdx.x*(THREADDIM+1)+x)<(BLOCKDIM * THREADDIM+4)&&(threadIdx.y*(THREADDIM+1)+y)<(BLOCKDIM * THREADDIM+4)) { G_sh[threadIdx.x*(THREADDIM+1)+x][threadIdx.y*(THREADDIM+1)+y] =data[n*((n+BLOCKDIM*THREADDIM*blockIdx.x + threadIdx.x*(THREADDIM+1)+x-2)%n)+(n+BLOCKDIM*THREADDIM*blockIdx.y + threadIdx.y*(THREADDIM+1)+y-2)%n]; } } } __syncthreads(); for(int x=0;x<THREADDIM;x++){ for(int y=0;y<THREADDIM;y++){ int my_x=(blockIdx.x*blockDim.x+threadIdx.x)*THREADDIM+x; int my_y=(blockIdx.y*blockDim.y+threadIdx.y)*THREADDIM+y; int my_id=my_x*n+my_y; //If thread is outside of compute id threshhold it doesnt need to do anything if(my_x>=n||my_y>=n){ break; } double sum=0; for(int i=0;i<5;i++){ for(int j=0;j<5;j++){ sum+=w_sh[i][j]*G_sh[threadIdx.x*THREADDIM+x+i][threadIdx.y*THREADDIM+y+j`]; } } if((sum<1e-5)&&(sum>-(1e-5))){ result[my_id]=data[my_id]; } else if(sum<0){ result[my_id]=-1; } else{ result[my_id]=1; } } } } void ising( int *G, double *w, int k, int n){ int * dev_temp; int * dev_G; int * dev_res; double * dev_w; if(hipMalloc(&dev_G,n*n*sizeof(int))!=hipSuccess||hipMalloc(&dev_res,n*n*sizeof(int))!=hipSuccess||hipMalloc(&dev_w,25*sizeof(double))!=hipSuccess){ printf("Error: could not allocate memory on device!"); return; } //copy data to GPU Device hipMemcpy(dev_G,G,n*n*sizeof(int),hipMemcpyDefault); hipMemcpy(dev_w,w,25*sizeof(double),hipMemcpyDefault); //execute kernel for(int rep=0;rep<k;rep++){ dim3 dimBlock(BLOCKDIM,BLOCKDIM); dim3 dimGrid(n/(BLOCKDIM*THREADDIM)+1,n/(BLOCKDIM*THREADDIM)+1); hipLaunchKernelGGL(( apply_w), dim3(dimGrid),dim3(dimBlock), 0, 0, dev_G,dev_res,dev_w,n); dev_temp=dev_res; dev_res=dev_G; dev_G=dev_temp; } //Bring results back to CPU Host hipMemcpy(G,dev_G,n*n*sizeof(int),hipMemcpyDefault); }
129656cf45a3d432dd52335c3cde0600e563e229.cu
//! Ising model evolution /*! \param G Spins on the square lattice [n-by-n] \param w Weight matrix [5-by-5] \param k Number of iterations [scalar] \param n Number of lattice points per dim [scalar] NOTE: Both matrices G and w are stored in row-major format. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <cuda.h> #include "ising.h" #define BLOCKDIM 16 #define THREADDIM 6 //This is (int)sqrt(MAXMOMENTSPERTHREAD) __global__ void apply_w(int * data,int * result, double * filter, int n){ __shared__ int G_sh[BLOCKDIM * THREADDIM+4][BLOCKDIM * THREADDIM+4]; __shared__ double w_sh[5][5]; if (threadIdx.x < 5 && threadIdx.y < 5) w_sh[threadIdx.x][threadIdx.y] = filter[5*threadIdx.x + threadIdx.y]; for (int x = 0; x <THREADDIM+1; x ++) { for (int y = 0; y <THREADDIM+1; y ++) { if((threadIdx.x*(THREADDIM+1)+x)<(BLOCKDIM * THREADDIM+4)&&(threadIdx.y*(THREADDIM+1)+y)<(BLOCKDIM * THREADDIM+4)) { G_sh[threadIdx.x*(THREADDIM+1)+x][threadIdx.y*(THREADDIM+1)+y] =data[n*((n+BLOCKDIM*THREADDIM*blockIdx.x + threadIdx.x*(THREADDIM+1)+x-2)%n)+(n+BLOCKDIM*THREADDIM*blockIdx.y + threadIdx.y*(THREADDIM+1)+y-2)%n]; } } } __syncthreads(); for(int x=0;x<THREADDIM;x++){ for(int y=0;y<THREADDIM;y++){ int my_x=(blockIdx.x*blockDim.x+threadIdx.x)*THREADDIM+x; int my_y=(blockIdx.y*blockDim.y+threadIdx.y)*THREADDIM+y; int my_id=my_x*n+my_y; //If thread is outside of compute id threshhold it doesnt need to do anything if(my_x>=n||my_y>=n){ break; } double sum=0; for(int i=0;i<5;i++){ for(int j=0;j<5;j++){ sum+=w_sh[i][j]*G_sh[threadIdx.x*THREADDIM+x+i][threadIdx.y*THREADDIM+y+j`]; } } if((sum<1e-5)&&(sum>-(1e-5))){ result[my_id]=data[my_id]; } else if(sum<0){ result[my_id]=-1; } else{ result[my_id]=1; } } } } void ising( int *G, double *w, int k, int n){ int * dev_temp; int * dev_G; int * dev_res; double * dev_w; if(cudaMalloc(&dev_G,n*n*sizeof(int))!=cudaSuccess||cudaMalloc(&dev_res,n*n*sizeof(int))!=cudaSuccess||cudaMalloc(&dev_w,25*sizeof(double))!=cudaSuccess){ printf("Error: could not allocate memory on device!"); return; } //copy data to GPU Device cudaMemcpy(dev_G,G,n*n*sizeof(int),cudaMemcpyDefault); cudaMemcpy(dev_w,w,25*sizeof(double),cudaMemcpyDefault); //execute kernel for(int rep=0;rep<k;rep++){ dim3 dimBlock(BLOCKDIM,BLOCKDIM); dim3 dimGrid(n/(BLOCKDIM*THREADDIM)+1,n/(BLOCKDIM*THREADDIM)+1); apply_w<<<dimGrid,dimBlock>>>(dev_G,dev_res,dev_w,n); dev_temp=dev_res; dev_res=dev_G; dev_G=dev_temp; } //Bring results back to CPU Host cudaMemcpy(G,dev_G,n*n*sizeof(int),cudaMemcpyDefault); }
efe854e3a0d2d225c885d9c192dc3b77e22a99f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/label_smooth_kernel.h" namespace phi { template <typename T> struct LabelSmoothFunctor { T epsilon; T label_dim; __forceinline__ LabelSmoothFunctor(float epsilon_data, int label_dim_data) { epsilon = static_cast<T>(epsilon_data); label_dim = static_cast<T>(label_dim_data); } __device__ __forceinline__ T operator()(const T x) const { return (static_cast<T>(1 - epsilon) * x + static_cast<T>(epsilon / label_dim)); } }; template <typename T> __global__ void LabelSmoothRunDistKernel(const int N, const float epsilon, const int dist_numel, const T* src, const T* dist_data, T* dst) { CUDA_KERNEL_LOOP(idx, N) { int dist_idx = idx % dist_numel; dst[idx] = static_cast<T>(1 - epsilon) * src[idx] + static_cast<T>(epsilon) * dist_data[dist_idx]; } } template <typename T, typename Context> void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, paddle::optional<const DenseTensor&> prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; auto size_prob = label.numel(); const T* in_data = label.data<T>(); T* out_data = ctx.template Alloc<T>(out); if (prior_dist.get_ptr()) { int threads = 512; int grid = (size_prob + threads - 1) / threads; auto stream = ctx.stream(); const auto* dist_t = prior_dist.get_ptr(); auto dist_numel = dist_t->numel(); const T* dist_data = dist_t->data<T>(); hipLaunchKernelGGL(( LabelSmoothRunDistKernel<T>), dim3(grid), dim3(threads), 0, stream, size_prob, epsilon, dist_numel, in_data, dist_data, out_data); } else { std::vector<const DenseTensor*> ins = {&label}; std::vector<DenseTensor*> outs = {out}; auto functor = LabelSmoothFunctor<T>(epsilon, label_dim); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( ctx, ins, &outs, functor); } } } // namespace phi PD_REGISTER_KERNEL( label_smooth, GPU, ALL_LAYOUT, phi::LabelSmoothKernel, float, double) {}
efe854e3a0d2d225c885d9c192dc3b77e22a99f8.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <vector> #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/label_smooth_kernel.h" namespace phi { template <typename T> struct LabelSmoothFunctor { T epsilon; T label_dim; __forceinline__ LabelSmoothFunctor(float epsilon_data, int label_dim_data) { epsilon = static_cast<T>(epsilon_data); label_dim = static_cast<T>(label_dim_data); } __device__ __forceinline__ T operator()(const T x) const { return (static_cast<T>(1 - epsilon) * x + static_cast<T>(epsilon / label_dim)); } }; template <typename T> __global__ void LabelSmoothRunDistKernel(const int N, const float epsilon, const int dist_numel, const T* src, const T* dist_data, T* dst) { CUDA_KERNEL_LOOP(idx, N) { int dist_idx = idx % dist_numel; dst[idx] = static_cast<T>(1 - epsilon) * src[idx] + static_cast<T>(epsilon) * dist_data[dist_idx]; } } template <typename T, typename Context> void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, paddle::optional<const DenseTensor&> prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; auto size_prob = label.numel(); const T* in_data = label.data<T>(); T* out_data = ctx.template Alloc<T>(out); if (prior_dist.get_ptr()) { int threads = 512; int grid = (size_prob + threads - 1) / threads; auto stream = ctx.stream(); const auto* dist_t = prior_dist.get_ptr(); auto dist_numel = dist_t->numel(); const T* dist_data = dist_t->data<T>(); LabelSmoothRunDistKernel<T><<<grid, threads, 0, stream>>>( size_prob, epsilon, dist_numel, in_data, dist_data, out_data); } else { std::vector<const DenseTensor*> ins = {&label}; std::vector<DenseTensor*> outs = {out}; auto functor = LabelSmoothFunctor<T>(epsilon, label_dim); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( ctx, ins, &outs, functor); } } } // namespace phi PD_REGISTER_KERNEL( label_smooth, GPU, ALL_LAYOUT, phi::LabelSmoothKernel, float, double) {}
4de7ed7928e778ccd14b7ec19dc9b993f594fb56.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * A very simple cuda implementation of reduce. Uses an array of 1024x1024 * items which are summed into a 1024 array and then summed into a value. */ #include <stdio.h> #include <stdlib.h> /* * this kernel will sum all of the data from in into out - at * least as far as the block will carry you */ __global__ void reduce(float* out, float* in, int size); void startClock(char*); void stopClock(char*); void printClock(char*); int main(int argc, char** argv) { int size = 1024*1024; printf("size = %d\n",size); void *d_in; // device data void *d_mid; // device data - middle results void *d_out; // device data - the answer float *h_in; // host data float h_out; int numBlocks = 1024; hipMalloc(&d_in,size*sizeof(float)); hipMalloc(&d_mid,numBlocks*sizeof(float)); hipMalloc(&d_out,sizeof(float)); h_in = (float*) malloc(size*sizeof(float)); for (int i = 0; i < size; i++) { h_in[i] = 1; } startClock("copy data to device"); hipMemcpy(d_in,h_in,size*sizeof(float),hipMemcpyHostToDevice); stopClock("copy data to device"); startClock("compute"); // use max threads/block and the required # of blocks AND // ask for some shared memory hipLaunchKernelGGL(( reduce), dim3(1024),dim3(1024),1024, 0, (float*) d_mid,(float*) d_in,size); hipLaunchKernelGGL(( reduce), dim3(1),dim3(1024),1024, 0, (float*)d_out,(float*)d_mid,1024); hipDeviceSynchronize(); stopClock("compute"); startClock("copy data to host"); h_out = -17; hipMemcpy(&h_out,d_out,sizeof(float),hipMemcpyDeviceToHost); stopClock("copy data to host"); printf("The total is %f\n",h_out); free(h_in); hipFree(d_in); hipFree(d_out); printClock("copy data to device"); printClock("compute"); printClock("copy data to host"); }
4de7ed7928e778ccd14b7ec19dc9b993f594fb56.cu
/* * A very simple cuda implementation of reduce. Uses an array of 1024x1024 * items which are summed into a 1024 array and then summed into a value. */ #include <stdio.h> #include <stdlib.h> /* * this kernel will sum all of the data from in into out - at * least as far as the block will carry you */ __global__ void reduce(float* out, float* in, int size); void startClock(char*); void stopClock(char*); void printClock(char*); int main(int argc, char** argv) { int size = 1024*1024; printf("size = %d\n",size); void *d_in; // device data void *d_mid; // device data - middle results void *d_out; // device data - the answer float *h_in; // host data float h_out; int numBlocks = 1024; cudaMalloc(&d_in,size*sizeof(float)); cudaMalloc(&d_mid,numBlocks*sizeof(float)); cudaMalloc(&d_out,sizeof(float)); h_in = (float*) malloc(size*sizeof(float)); for (int i = 0; i < size; i++) { h_in[i] = 1; } startClock("copy data to device"); cudaMemcpy(d_in,h_in,size*sizeof(float),cudaMemcpyHostToDevice); stopClock("copy data to device"); startClock("compute"); // use max threads/block and the required # of blocks AND // ask for some shared memory reduce<<<1024,1024,1024>>>((float*) d_mid,(float*) d_in,size); reduce<<<1,1024,1024>>>((float*)d_out,(float*)d_mid,1024); cudaThreadSynchronize(); stopClock("compute"); startClock("copy data to host"); h_out = -17; cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost); stopClock("copy data to host"); printf("The total is %f\n",h_out); free(h_in); cudaFree(d_in); cudaFree(d_out); printClock("copy data to device"); printClock("compute"); printClock("copy data to host"); }
03206872a52b84f0413d1dcb1cf21c2276663b6f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> #include "clusterkernel.h" __device__ inline void atomicAddFloat(float* address, float value){ float old = value; float new_old; do { new_old = atomicExch(address, 0.0f); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0f); }; /** / euclideanDistance() Calculates euclidean distance between two records */ __device__ double euclideanDistance(double *record1, double *record2, int cols){ double dist = 0.0f; int i; for(i = 1; i < cols; i++){ dist += (record1[i]-record2[i]) * (record1[i]-record2[i]); } return sqrt(dist); } /** / calculateCentroidMeans() Calculates mean values for each centroid element / / Each thread sums a single attribute for each record / belonging to the cluster for that thread. */ __global__ void calculateCentroidMeans(double* centroids, size_t centroidsPitch, int k, double* records, size_t recordsPitch, int rows, int cols){ extern __shared__ double s_centroids[]; int i; s_centroids[threadIdx.y*cols+threadIdx.x] = 0.0; for(i = 0; i < rows; i++){ int currentCentroid = records[i*recordsPitch]; if(currentCentroid == threadIdx.y){ if(threadIdx.x > 0) s_centroids[threadIdx.y*cols+threadIdx.x] += records[i*recordsPitch+threadIdx.x]; else s_centroids[threadIdx.y*cols] += 1; } } __syncthreads(); if(threadIdx.x == 0){ centroids[threadIdx.y*centroidsPitch] = s_centroids[threadIdx.y*cols]; return; } __syncthreads(); if(s_centroids[threadIdx.y*cols] == 0 || isnan(s_centroids[threadIdx.y*cols]) || isnan(s_centroids[threadIdx.y*cols+threadIdx.x]) || s_centroids[threadIdx.y*cols+threadIdx.x]==0) centroids[threadIdx.y*centroidsPitch+threadIdx.x] = 0.0; else centroids[threadIdx.y*centroidsPitch+threadIdx.x] = s_centroids[threadIdx.y*cols+threadIdx.x]/s_centroids[threadIdx.y*cols]; } /** / findClosestClusters() Record blocks assign each record to closest cluster */ __global__ void findClosestClusters(double* centroids, size_t centroidsPitch, int k, double* records, size_t recordsPitch, int rows, int cols){ extern __shared__ double s_centroids[]; int idx = 0; int idy = blockIdx.y * blockDim.y + threadIdx.y; int sharedIdx = threadIdx.y*cols; int recordIdx = idy*recordsPitch; if(idx >= cols || idy >= rows) return; int closestCluster = 0; double closestDistance = euclideanDistance(&records[sharedIdx], &centroids[closestCluster * centroidsPitch], cols); double thisDistance; int i; for(i = 0; i < k; i++){ thisDistance = euclideanDistance(&records[recordIdx], &centroids[i*centroidsPitch], cols); double diff = thisDistance - closestDistance; if(diff < 0){ closestDistance = thisDistance; closestCluster = i; } } records[idy * recordsPitch] = closestCluster; } /** / calculateSSE() Calculates the sum of squared errors of all records to assigned centroid */ __global__ void calculateSSE(double* centroids, size_t centroidsPitch, double* records, size_t recordsPitch, int rows, int cols, double *SSE){ int idy = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ double s_SSE[]; s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = 0; if(idy >= rows || threadIdx.x >= cols) return; int myCluster = records[idy * recordsPitch]; double myval = records[idy * recordsPitch + threadIdx.x+1]; double avg = centroids[myCluster*centroidsPitch+threadIdx.x+1]; if(isnan(myval)) s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = -avg; else s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = (myval-avg) * (myval-avg); __syncthreads(); if(threadIdx.x==0){ int i; for(i = 1; i< blockDim.x; i++) s_SSE[threadIdx.y*blockDim.x] += s_SSE[threadIdx.y*blockDim.x+i]; } __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0){ int i; for(i = 0; i < blockDim.y; i++) s_SSE[0] += s_SSE[i*blockDim.x]; SSE[blockIdx.y] = s_SSE[0]; } }
03206872a52b84f0413d1dcb1cf21c2276663b6f.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <iostream> #include "clusterkernel.h" __device__ inline void atomicAddFloat(float* address, float value){ float old = value; float new_old; do { new_old = atomicExch(address, 0.0f); new_old += old; } while ((old = atomicExch(address, new_old))!=0.0f); }; /** / euclideanDistance() Calculates euclidean distance between two records */ __device__ double euclideanDistance(double *record1, double *record2, int cols){ double dist = 0.0f; int i; for(i = 1; i < cols; i++){ dist += (record1[i]-record2[i]) * (record1[i]-record2[i]); } return sqrt(dist); } /** / calculateCentroidMeans() Calculates mean values for each centroid element / / Each thread sums a single attribute for each record / belonging to the cluster for that thread. */ __global__ void calculateCentroidMeans(double* centroids, size_t centroidsPitch, int k, double* records, size_t recordsPitch, int rows, int cols){ extern __shared__ double s_centroids[]; int i; s_centroids[threadIdx.y*cols+threadIdx.x] = 0.0; for(i = 0; i < rows; i++){ int currentCentroid = records[i*recordsPitch]; if(currentCentroid == threadIdx.y){ if(threadIdx.x > 0) s_centroids[threadIdx.y*cols+threadIdx.x] += records[i*recordsPitch+threadIdx.x]; else s_centroids[threadIdx.y*cols] += 1; } } __syncthreads(); if(threadIdx.x == 0){ centroids[threadIdx.y*centroidsPitch] = s_centroids[threadIdx.y*cols]; return; } __syncthreads(); if(s_centroids[threadIdx.y*cols] == 0 || isnan(s_centroids[threadIdx.y*cols]) || isnan(s_centroids[threadIdx.y*cols+threadIdx.x]) || s_centroids[threadIdx.y*cols+threadIdx.x]==0) centroids[threadIdx.y*centroidsPitch+threadIdx.x] = 0.0; else centroids[threadIdx.y*centroidsPitch+threadIdx.x] = s_centroids[threadIdx.y*cols+threadIdx.x]/s_centroids[threadIdx.y*cols]; } /** / findClosestClusters() Record blocks assign each record to closest cluster */ __global__ void findClosestClusters(double* centroids, size_t centroidsPitch, int k, double* records, size_t recordsPitch, int rows, int cols){ extern __shared__ double s_centroids[]; int idx = 0; int idy = blockIdx.y * blockDim.y + threadIdx.y; int sharedIdx = threadIdx.y*cols; int recordIdx = idy*recordsPitch; if(idx >= cols || idy >= rows) return; int closestCluster = 0; double closestDistance = euclideanDistance(&records[sharedIdx], &centroids[closestCluster * centroidsPitch], cols); double thisDistance; int i; for(i = 0; i < k; i++){ thisDistance = euclideanDistance(&records[recordIdx], &centroids[i*centroidsPitch], cols); double diff = thisDistance - closestDistance; if(diff < 0){ closestDistance = thisDistance; closestCluster = i; } } records[idy * recordsPitch] = closestCluster; } /** / calculateSSE() Calculates the sum of squared errors of all records to assigned centroid */ __global__ void calculateSSE(double* centroids, size_t centroidsPitch, double* records, size_t recordsPitch, int rows, int cols, double *SSE){ int idy = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ double s_SSE[]; s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = 0; if(idy >= rows || threadIdx.x >= cols) return; int myCluster = records[idy * recordsPitch]; double myval = records[idy * recordsPitch + threadIdx.x+1]; double avg = centroids[myCluster*centroidsPitch+threadIdx.x+1]; if(isnan(myval)) s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = -avg; else s_SSE[threadIdx.y*blockDim.x+threadIdx.x] = (myval-avg) * (myval-avg); __syncthreads(); if(threadIdx.x==0){ int i; for(i = 1; i< blockDim.x; i++) s_SSE[threadIdx.y*blockDim.x] += s_SSE[threadIdx.y*blockDim.x+i]; } __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0){ int i; for(i = 0; i < blockDim.y; i++) s_SSE[0] += s_SSE[i*blockDim.x]; SSE[blockIdx.y] = s_SSE[0]; } }
c4a51675e89e56ab806b2c7e633de8b9a81efd0f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <limits> #include <sys/time.h> #define NUM_PARTICLES 100000 #define NUM_ITERATIONS 1000 #define BLOCK_SIZE 256 #define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec) #define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0 #define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0 typedef struct { float3 position; float3 velocity; } Particle; void cpu_timestep(Particle *particles, const float dt) { for (unsigned int i = 0; i < NUM_PARTICLES; i++) { particles[i].position.x += particles[i].velocity.x * dt; particles[i].position.y += particles[i].velocity.y * dt; particles[i].position.z += particles[i].velocity.z * dt; } } __global__ void gpu_timestep(Particle *particles, const float dt) { const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NUM_PARTICLES) { particles[i].position.x += particles[i].velocity.x * dt; particles[i].position.y += particles[i].velocity.y * dt; particles[i].position.z += particles[i].velocity.z * dt; } } int main(int argc, char **argv) { struct timeval start, end; const float dt = 1.0; // Initialize CPU data. Particle *cpu_particles = (Particle *)malloc(NUM_PARTICLES * sizeof(Particle)); for (unsigned int i = 0; i < NUM_PARTICLES; i++) { cpu_particles[i].position.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].position.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].position.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); } // Initialize GPU data. Particle *gpu_particles; hipMalloc(&gpu_particles, NUM_PARTICLES * sizeof(Particle)); // Run CPU simulation. printf("Running simulation on the CPU... "); gettimeofday(&start, NULL); for (unsigned int i = 0; i < NUM_ITERATIONS; i++) { cpu_timestep(cpu_particles, dt); } gettimeofday(&end, NULL); printf("Done! Took %lfs.\n", SECONDS(start, end)); // Run GPU simulation. printf("Running simulation on the GPU... "); gettimeofday(&start, NULL); hipMemcpy(gpu_particles, cpu_particles, NUM_PARTICLES * sizeof(Particle), hipMemcpyHostToDevice); for (unsigned int i = 0; i < NUM_ITERATIONS; i++) { hipLaunchKernelGGL(( gpu_timestep), dim3((NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, gpu_particles, dt); hipDeviceSynchronize(); } hipMemcpy(cpu_particles, gpu_particles, NUM_PARTICLES * sizeof(Particle), hipMemcpyDeviceToHost); // Copy anywhere. gettimeofday(&end, NULL); printf("Done! Took %lfs.\n", SECONDS(start, end)); // Free resources. free(cpu_particles); hipFree(gpu_particles); return 0; }
c4a51675e89e56ab806b2c7e633de8b9a81efd0f.cu
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <limits> #include <sys/time.h> #define NUM_PARTICLES 100000 #define NUM_ITERATIONS 1000 #define BLOCK_SIZE 256 #define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec) #define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0 #define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0 typedef struct { float3 position; float3 velocity; } Particle; void cpu_timestep(Particle *particles, const float dt) { for (unsigned int i = 0; i < NUM_PARTICLES; i++) { particles[i].position.x += particles[i].velocity.x * dt; particles[i].position.y += particles[i].velocity.y * dt; particles[i].position.z += particles[i].velocity.z * dt; } } __global__ void gpu_timestep(Particle *particles, const float dt) { const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < NUM_PARTICLES) { particles[i].position.x += particles[i].velocity.x * dt; particles[i].position.y += particles[i].velocity.y * dt; particles[i].position.z += particles[i].velocity.z * dt; } } int main(int argc, char **argv) { struct timeval start, end; const float dt = 1.0; // Initialize CPU data. Particle *cpu_particles = (Particle *)malloc(NUM_PARTICLES * sizeof(Particle)); for (unsigned int i = 0; i < NUM_PARTICLES; i++) { cpu_particles[i].position.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].position.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].position.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.x = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.y = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); cpu_particles[i].velocity.z = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); } // Initialize GPU data. Particle *gpu_particles; cudaMalloc(&gpu_particles, NUM_PARTICLES * sizeof(Particle)); // Run CPU simulation. printf("Running simulation on the CPU... "); gettimeofday(&start, NULL); for (unsigned int i = 0; i < NUM_ITERATIONS; i++) { cpu_timestep(cpu_particles, dt); } gettimeofday(&end, NULL); printf("Done! Took %lfs.\n", SECONDS(start, end)); // Run GPU simulation. printf("Running simulation on the GPU... "); gettimeofday(&start, NULL); cudaMemcpy(gpu_particles, cpu_particles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyHostToDevice); for (unsigned int i = 0; i < NUM_ITERATIONS; i++) { gpu_timestep<<<(NUM_PARTICLES + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(gpu_particles, dt); cudaDeviceSynchronize(); } cudaMemcpy(cpu_particles, gpu_particles, NUM_PARTICLES * sizeof(Particle), cudaMemcpyDeviceToHost); // Copy anywhere. gettimeofday(&end, NULL); printf("Done! Took %lfs.\n", SECONDS(start, end)); // Free resources. free(cpu_particles); cudaFree(gpu_particles); return 0; }
c171e23615d1e960fec0bace1703d4ba3efe094a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // Comment out this line to enable debug mode #define NDEBUG /* time stamp function in milliseconds */ __host__ double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } __host__ void initB(float *B, int nB) { for (int i = 0; i < nB; i++) { int iIndex = i * nB * nB; for (int j = 0; j < nB; j++) { int ijIndex = iIndex + j * nB; for (int k = 0; k < nB; k++) { int ijkIndex = ijIndex + k; if (i == 0 || j == 0 || k == 0) { B[ijkIndex] = 0; } else { B[ijkIndex] = ((i - 1 + j - 1 + k - 1) % 10) * (float)1.1; } } } } } #define h_getB(B, nB, i, j, k) B[((i) + 1) * nB * nB + ((j) + 1) * nB + ((k) + 1)] __host__ void jacobiRelaxationReference(float *A, float *B, int n) { int nB = n + 1; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (i >= n - 1 || j >= n - 1 || k >= n - 1) { A[ijkIndex] = 0.0; } else { A[ijkIndex] = (float)0.8 * (h_getB(B, nB, i - 1, j, k) + h_getB(B, nB, i + 1, j, k) + h_getB(B, nB, i, j - 1, k) + h_getB(B, nB, i, j + 1, k) + h_getB(B, nB, i, j, k - 1) + h_getB(B, nB, i, j, k + 1)); } } } } } __host__ int checkA(float *Expected, float *Actual, int n) { for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (Expected[ijkIndex] != Actual[ijkIndex]) { #ifndef NDEBUG printf("(i=%d, j=%d, k=%d) Expected=%f Actual=%f\n", i, j, k, Expected[ijkIndex], Actual[ijkIndex]); #endif return 0; } } } } return 1; } __host__ double sumA(float *A, int n) { double sum = 0; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; sum += A[ijkIndex] * (((i + j + k) % 10) ? 1 : -1); } } } return sum; } __global__ void jacobiRelaxation(float *A, float *B, int n, int startingI) { extern __shared__ float s_data[]; /* Global Index */ int globalK = blockDim.x * blockIdx.x + threadIdx.x; int globalJ = blockDim.y * blockIdx.y + threadIdx.y; int globalI = blockDim.z * blockIdx.z + threadIdx.z + startingI; int globalIdx = globalI * n * n + globalJ * n + globalK; int nB = n + 1; int sizePerGlobalBI = nB * nB; int sizePerGlobalBJ = nB; int globalBIIndex = (globalI + 1) * sizePerGlobalBI; int globalBIJIndex = globalBIIndex + (globalJ + 1) * sizePerGlobalBJ; int globalBIdx = globalBIJIndex + (globalK + 1); if (globalK >= n || globalJ >= n || globalI >= n) { return; } /* Local Index */ // int sizeI = blockDim.z + 2; int sizeJ = blockDim.y + 2; int sizeK = blockDim.x + 2; int sizePerLocalI = sizeJ * sizeK; int sizePerLocalJ = sizeK; int localIIndex = (threadIdx.z + 1) * sizePerLocalI; int localIJIndex = localIIndex + (threadIdx.y + 1) * sizePerLocalJ; int localIdx = localIJIndex + (threadIdx.x + 1); s_data[localIdx] = B[globalBIdx]; if (threadIdx.z == 0) { s_data[localIdx - sizePerLocalI] = B[globalBIdx - sizePerGlobalBI]; s_data[localIdx + blockDim.z * sizePerLocalI] = B[globalBIdx + blockDim.z * sizePerGlobalBI]; } if (threadIdx.y == 0) { s_data[localIdx - sizePerLocalJ] = B[globalBIdx - sizePerGlobalBJ]; s_data[localIdx + blockDim.y * sizePerLocalJ] = B[globalBIdx + blockDim.y * sizePerGlobalBJ]; } if (threadIdx.x == 0) { s_data[localIdx - 1] = B[globalBIdx - 1]; s_data[localIdx + blockDim.x] = B[globalBIdx + blockDim.x]; } __syncthreads(); if (globalK == n - 1 || globalJ == n - 1 || globalI == n - 1) { A[globalIdx] = 0; } else { A[globalIdx] = (float)0.8 * (s_data[localIdx - sizePerLocalI] + s_data[localIdx + sizePerLocalI] + s_data[localIdx - sizePerLocalJ] + s_data[localIdx + sizePerLocalJ] + s_data[localIdx - 1] + s_data[localIdx + 1]); } } int main(int argc, char *argv[]) { int error = 0; /* Get Dimension */ if (argc != 2) { printf("Error: The number of arguments is not exactly 1\n"); return 0; } int n = atoi(argv[1]); size_t numElem = n * n * n; size_t numBytes = numElem * sizeof(float); int nB = n + 1; size_t numElemB = nB * nB * nB; size_t numBytesB = numElemB * sizeof(float); #ifndef NDEBUG printf("n=%d, numElem=%ld, numBytes=%ld\n", n, numElem, numBytes); printf("nB=%d, numElemB=%ld, numBytesB=%ld\n", nB, numElemB, numBytesB); #endif /* Allocate Host Memory */ float *h_B = NULL; error = error || hipHostMalloc((void **)&h_B, numBytesB, 0); #ifndef NDEBUG float *h_hA = (float *)malloc(numBytes); #endif float *h_dA = NULL; error = error || hipHostMalloc((void **)&h_dA, numBytes, 0); if (error) { printf("Error: hipHostMalloc returns error\n"); return 0; } /* Initialize Host Memory */ initB(h_B, nB); #ifndef NDEBUG double timestampPreCpuKernel = getTimeStamp(); jacobiRelaxationReference(h_hA, h_B, n); double timestampPostCpuKernel = getTimeStamp(); printf("CPU: %lf %ld\n", sumA(h_hA, n), (long)ceil(1000*(timestampPostCpuKernel - timestampPreCpuKernel))); #endif /* Allocate Device Memory */ float *d_B = NULL; error = error || hipMalloc((void **)&d_B, numBytesB); float *d_A = NULL; error = error || hipMalloc((void **)&d_A, numBytes); if (error) { printf("Error: hipMalloc returns error\n"); return 0; } /* Configuration */ #define NUM_STREAM 2 int nIStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { nIStreams[i] = n / NUM_STREAM; } nIStreams[NUM_STREAM - 1] += n % NUM_STREAM; dim3 d_blockDim; d_blockDim.x = 32; d_blockDim.y = 32; d_blockDim.z = 1; // must be 1 dim3 d_gridDimStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { d_gridDimStreams[i].x = (n - 1) / d_blockDim.x + 1; d_gridDimStreams[i].y = (n - 1) / d_blockDim.y + 1; d_gridDimStreams[i].z = (nIStreams[i] - 1) / d_blockDim.z + 1; } /* Create NUM_STREAM Streams */ hipStream_t d_streams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { error = error || hipStreamCreate(&d_streams[i]); } if (error) { printf("Error: hipStreamCreate returns error\n"); return 0; } // TIMER BEGIN /* Copy Host Memory to Device Memory */ double timestampPreCpuGpuTransfer = getTimeStamp(); size_t numElemBStream1 = 0; if (NUM_STREAM != 1) { numElemBStream1 = (nIStreams[0] + 1 + 1) * nB * nB; } else { numElemBStream1 = (nIStreams[0] + 1) * nB * nB; } error = error || hipMemcpyAsync(d_B, h_B, numElemBStream1 * sizeof(float), hipMemcpyHostToDevice, d_streams[0]); if (NUM_STREAM != 1) { hipStreamSynchronize(d_streams[0]); } int numElemBStreams = numElemBStream1; for (int i = 1; i < NUM_STREAM; i++) { int nBIStreami = nIStreams[i]; size_t numElemBStreami = ((i == NUM_STREAM - 1) ? nBIStreami - 1 : nBIStreami) * nB * nB; error = error || hipMemcpyAsync(d_B + numElemBStreams, h_B + numElemBStreams, numElemBStreami * sizeof(float), hipMemcpyHostToDevice, d_streams[i]); numElemBStreams += numElemBStreami; if (i != NUM_STREAM - 1) { // Synchronize between hipMemcpyAsync hipStreamSynchronize(d_streams[i]); } } if (numElemBStreams != numElemB) { printf("Error: hipMemcpyAsync does not cover entire B (%ld != %ld)\n", numElemBStreams, numElemB); return 0; } if (error) { printf("Error: hipMemcpyAsync B returns error %d\n", error); return 0; } /* Run Kernel */ int d_smemNumElem = (d_blockDim.x + 2) * (d_blockDim.y + 2) * (d_blockDim.z + 2); size_t d_smemNumBytes = d_smemNumElem * sizeof(float); size_t d_startingI = 0; for (int i = 0; i < NUM_STREAM; i++) { hipLaunchKernelGGL(( jacobiRelaxation), dim3(d_gridDimStreams[i]), dim3(d_blockDim), d_smemNumBytes, d_streams[i], d_A, d_B, n, d_startingI); d_startingI += nIStreams[i]; } /* Copy Device Memory to Host Memory */ size_t numElemAStreams = 0; for (int i = 0; i < NUM_STREAM; i++) { size_t numElemAStreami = nIStreams[i] * n * n; error = error || hipMemcpyAsync(h_dA + numElemAStreams, d_A + numElemAStreams, numElemAStreami * sizeof(float), hipMemcpyDeviceToHost, d_streams[i]); numElemAStreams += numElemAStreami; } if (numElemAStreams != numElem) { printf("Error: hipMemcpyAsync does not cover entire A\n"); return 0; } if (error) { printf("Error: hipMemcpyAsync A returns error %d\n", error); return 0; } /* Synchronize Streams */ for (int i = 0; i < NUM_STREAM; i++) { hipStreamSynchronize(d_streams[i]); } double timestampPostGpuCpuTransfer = getTimeStamp(); // TIMER END /* Free Device Memory */ hipFree(d_A); d_A = NULL; hipFree(d_B); d_B = NULL; /* Output */ double aValue = sumA(h_dA, n); long totalGpuElapased = (long)ceil(1000*(timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer)); printf("%lf %ld\n", aValue, totalGpuElapased); #ifndef NDEBUG for (int i = 0; i < NUM_STREAM; i++) { printf("d_gridDimStream%d=(%d, %d, %d), d_blockDim=(%d, %d, %d), d_smemNumBytes=%ld\n", i, d_gridDimStreams[i].x, d_gridDimStreams[i].y, d_gridDimStreams[i].z, d_blockDim.x, d_blockDim.y, d_blockDim.z, d_smemNumBytes); } /* Verify Device Result with Host Result */ error = error || !checkA(h_hA, h_dA, n); if(error) { printf("Error: GPU result does not with CPU result\n"); } #endif /* Free Host Memory */ hipHostFree(h_dA); h_dA = NULL; #ifndef NDEBUG free(h_hA); h_hA = NULL; #endif hipHostFree(h_B); h_B = NULL; /* Clean Up Device Resource */ hipDeviceReset(); }
c171e23615d1e960fec0bace1703d4ba3efe094a.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> // Comment out this line to enable debug mode #define NDEBUG /* time stamp function in milliseconds */ __host__ double getTimeStamp() { struct timeval tv; gettimeofday(&tv, NULL); return (double)tv.tv_usec / 1000000 + tv.tv_sec; } __host__ void initB(float *B, int nB) { for (int i = 0; i < nB; i++) { int iIndex = i * nB * nB; for (int j = 0; j < nB; j++) { int ijIndex = iIndex + j * nB; for (int k = 0; k < nB; k++) { int ijkIndex = ijIndex + k; if (i == 0 || j == 0 || k == 0) { B[ijkIndex] = 0; } else { B[ijkIndex] = ((i - 1 + j - 1 + k - 1) % 10) * (float)1.1; } } } } } #define h_getB(B, nB, i, j, k) B[((i) + 1) * nB * nB + ((j) + 1) * nB + ((k) + 1)] __host__ void jacobiRelaxationReference(float *A, float *B, int n) { int nB = n + 1; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (i >= n - 1 || j >= n - 1 || k >= n - 1) { A[ijkIndex] = 0.0; } else { A[ijkIndex] = (float)0.8 * (h_getB(B, nB, i - 1, j, k) + h_getB(B, nB, i + 1, j, k) + h_getB(B, nB, i, j - 1, k) + h_getB(B, nB, i, j + 1, k) + h_getB(B, nB, i, j, k - 1) + h_getB(B, nB, i, j, k + 1)); } } } } } __host__ int checkA(float *Expected, float *Actual, int n) { for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; if (Expected[ijkIndex] != Actual[ijkIndex]) { #ifndef NDEBUG printf("(i=%d, j=%d, k=%d) Expected=%f Actual=%f\n", i, j, k, Expected[ijkIndex], Actual[ijkIndex]); #endif return 0; } } } } return 1; } __host__ double sumA(float *A, int n) { double sum = 0; for (int i = 0; i < n; i++) { int iIndex = i * n * n; for (int j = 0; j < n; j++) { int ijIndex = iIndex + j * n; for (int k = 0; k < n; k++) { int ijkIndex = ijIndex + k; sum += A[ijkIndex] * (((i + j + k) % 10) ? 1 : -1); } } } return sum; } __global__ void jacobiRelaxation(float *A, float *B, int n, int startingI) { extern __shared__ float s_data[]; /* Global Index */ int globalK = blockDim.x * blockIdx.x + threadIdx.x; int globalJ = blockDim.y * blockIdx.y + threadIdx.y; int globalI = blockDim.z * blockIdx.z + threadIdx.z + startingI; int globalIdx = globalI * n * n + globalJ * n + globalK; int nB = n + 1; int sizePerGlobalBI = nB * nB; int sizePerGlobalBJ = nB; int globalBIIndex = (globalI + 1) * sizePerGlobalBI; int globalBIJIndex = globalBIIndex + (globalJ + 1) * sizePerGlobalBJ; int globalBIdx = globalBIJIndex + (globalK + 1); if (globalK >= n || globalJ >= n || globalI >= n) { return; } /* Local Index */ // int sizeI = blockDim.z + 2; int sizeJ = blockDim.y + 2; int sizeK = blockDim.x + 2; int sizePerLocalI = sizeJ * sizeK; int sizePerLocalJ = sizeK; int localIIndex = (threadIdx.z + 1) * sizePerLocalI; int localIJIndex = localIIndex + (threadIdx.y + 1) * sizePerLocalJ; int localIdx = localIJIndex + (threadIdx.x + 1); s_data[localIdx] = B[globalBIdx]; if (threadIdx.z == 0) { s_data[localIdx - sizePerLocalI] = B[globalBIdx - sizePerGlobalBI]; s_data[localIdx + blockDim.z * sizePerLocalI] = B[globalBIdx + blockDim.z * sizePerGlobalBI]; } if (threadIdx.y == 0) { s_data[localIdx - sizePerLocalJ] = B[globalBIdx - sizePerGlobalBJ]; s_data[localIdx + blockDim.y * sizePerLocalJ] = B[globalBIdx + blockDim.y * sizePerGlobalBJ]; } if (threadIdx.x == 0) { s_data[localIdx - 1] = B[globalBIdx - 1]; s_data[localIdx + blockDim.x] = B[globalBIdx + blockDim.x]; } __syncthreads(); if (globalK == n - 1 || globalJ == n - 1 || globalI == n - 1) { A[globalIdx] = 0; } else { A[globalIdx] = (float)0.8 * (s_data[localIdx - sizePerLocalI] + s_data[localIdx + sizePerLocalI] + s_data[localIdx - sizePerLocalJ] + s_data[localIdx + sizePerLocalJ] + s_data[localIdx - 1] + s_data[localIdx + 1]); } } int main(int argc, char *argv[]) { int error = 0; /* Get Dimension */ if (argc != 2) { printf("Error: The number of arguments is not exactly 1\n"); return 0; } int n = atoi(argv[1]); size_t numElem = n * n * n; size_t numBytes = numElem * sizeof(float); int nB = n + 1; size_t numElemB = nB * nB * nB; size_t numBytesB = numElemB * sizeof(float); #ifndef NDEBUG printf("n=%d, numElem=%ld, numBytes=%ld\n", n, numElem, numBytes); printf("nB=%d, numElemB=%ld, numBytesB=%ld\n", nB, numElemB, numBytesB); #endif /* Allocate Host Memory */ float *h_B = NULL; error = error || cudaHostAlloc((void **)&h_B, numBytesB, 0); #ifndef NDEBUG float *h_hA = (float *)malloc(numBytes); #endif float *h_dA = NULL; error = error || cudaHostAlloc((void **)&h_dA, numBytes, 0); if (error) { printf("Error: cudaHostAlloc returns error\n"); return 0; } /* Initialize Host Memory */ initB(h_B, nB); #ifndef NDEBUG double timestampPreCpuKernel = getTimeStamp(); jacobiRelaxationReference(h_hA, h_B, n); double timestampPostCpuKernel = getTimeStamp(); printf("CPU: %lf %ld\n", sumA(h_hA, n), (long)ceil(1000*(timestampPostCpuKernel - timestampPreCpuKernel))); #endif /* Allocate Device Memory */ float *d_B = NULL; error = error || cudaMalloc((void **)&d_B, numBytesB); float *d_A = NULL; error = error || cudaMalloc((void **)&d_A, numBytes); if (error) { printf("Error: cudaMalloc returns error\n"); return 0; } /* Configuration */ #define NUM_STREAM 2 int nIStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { nIStreams[i] = n / NUM_STREAM; } nIStreams[NUM_STREAM - 1] += n % NUM_STREAM; dim3 d_blockDim; d_blockDim.x = 32; d_blockDim.y = 32; d_blockDim.z = 1; // must be 1 dim3 d_gridDimStreams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { d_gridDimStreams[i].x = (n - 1) / d_blockDim.x + 1; d_gridDimStreams[i].y = (n - 1) / d_blockDim.y + 1; d_gridDimStreams[i].z = (nIStreams[i] - 1) / d_blockDim.z + 1; } /* Create NUM_STREAM Streams */ cudaStream_t d_streams[NUM_STREAM]; for (int i = 0; i < NUM_STREAM; i++) { error = error || cudaStreamCreate(&d_streams[i]); } if (error) { printf("Error: cudaStreamCreate returns error\n"); return 0; } // TIMER BEGIN /* Copy Host Memory to Device Memory */ double timestampPreCpuGpuTransfer = getTimeStamp(); size_t numElemBStream1 = 0; if (NUM_STREAM != 1) { numElemBStream1 = (nIStreams[0] + 1 + 1) * nB * nB; } else { numElemBStream1 = (nIStreams[0] + 1) * nB * nB; } error = error || cudaMemcpyAsync(d_B, h_B, numElemBStream1 * sizeof(float), cudaMemcpyHostToDevice, d_streams[0]); if (NUM_STREAM != 1) { cudaStreamSynchronize(d_streams[0]); } int numElemBStreams = numElemBStream1; for (int i = 1; i < NUM_STREAM; i++) { int nBIStreami = nIStreams[i]; size_t numElemBStreami = ((i == NUM_STREAM - 1) ? nBIStreami - 1 : nBIStreami) * nB * nB; error = error || cudaMemcpyAsync(d_B + numElemBStreams, h_B + numElemBStreams, numElemBStreami * sizeof(float), cudaMemcpyHostToDevice, d_streams[i]); numElemBStreams += numElemBStreami; if (i != NUM_STREAM - 1) { // Synchronize between cudaMemcpyAsync cudaStreamSynchronize(d_streams[i]); } } if (numElemBStreams != numElemB) { printf("Error: cudaMemcpyAsync does not cover entire B (%ld != %ld)\n", numElemBStreams, numElemB); return 0; } if (error) { printf("Error: cudaMemcpyAsync B returns error %d\n", error); return 0; } /* Run Kernel */ int d_smemNumElem = (d_blockDim.x + 2) * (d_blockDim.y + 2) * (d_blockDim.z + 2); size_t d_smemNumBytes = d_smemNumElem * sizeof(float); size_t d_startingI = 0; for (int i = 0; i < NUM_STREAM; i++) { jacobiRelaxation<<<d_gridDimStreams[i], d_blockDim, d_smemNumBytes, d_streams[i]>>>(d_A, d_B, n, d_startingI); d_startingI += nIStreams[i]; } /* Copy Device Memory to Host Memory */ size_t numElemAStreams = 0; for (int i = 0; i < NUM_STREAM; i++) { size_t numElemAStreami = nIStreams[i] * n * n; error = error || cudaMemcpyAsync(h_dA + numElemAStreams, d_A + numElemAStreams, numElemAStreami * sizeof(float), cudaMemcpyDeviceToHost, d_streams[i]); numElemAStreams += numElemAStreami; } if (numElemAStreams != numElem) { printf("Error: cudaMemcpyAsync does not cover entire A\n"); return 0; } if (error) { printf("Error: cudaMemcpyAsync A returns error %d\n", error); return 0; } /* Synchronize Streams */ for (int i = 0; i < NUM_STREAM; i++) { cudaStreamSynchronize(d_streams[i]); } double timestampPostGpuCpuTransfer = getTimeStamp(); // TIMER END /* Free Device Memory */ cudaFree(d_A); d_A = NULL; cudaFree(d_B); d_B = NULL; /* Output */ double aValue = sumA(h_dA, n); long totalGpuElapased = (long)ceil(1000*(timestampPostGpuCpuTransfer - timestampPreCpuGpuTransfer)); printf("%lf %ld\n", aValue, totalGpuElapased); #ifndef NDEBUG for (int i = 0; i < NUM_STREAM; i++) { printf("d_gridDimStream%d=(%d, %d, %d), d_blockDim=(%d, %d, %d), d_smemNumBytes=%ld\n", i, d_gridDimStreams[i].x, d_gridDimStreams[i].y, d_gridDimStreams[i].z, d_blockDim.x, d_blockDim.y, d_blockDim.z, d_smemNumBytes); } /* Verify Device Result with Host Result */ error = error || !checkA(h_hA, h_dA, n); if(error) { printf("Error: GPU result does not with CPU result\n"); } #endif /* Free Host Memory */ cudaFreeHost(h_dA); h_dA = NULL; #ifndef NDEBUG free(h_hA); h_hA = NULL; #endif cudaFreeHost(h_B); h_B = NULL; /* Clean Up Device Resource */ cudaDeviceReset(); }
3339ccb7046183f560b7f3cec2f65cc8dfb89660.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <cmath> #include "ATen/ATen.h" #include "ATen/hip/HIPContext.h" #include "ATen/hip/detail/IndexUtils.cuh" #include "ATen/TensorUtils.h" // #include "ATen/Type.h" #include "ATen/AccumulateType.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 template<typename T> __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } #include "type_shim.h" typedef enum{ ADAM_MODE_0 =0, // eps under square root ADAM_MODE_1 =1 // eps outside square root } adamMode_t; template <typename T, typename GRAD_T> __global__ void adam_cuda_kernel( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; for (int j = i; j < tsize; j+=totThreads) { T scaled_grad = g[j]/grad_scale; m[j] = b1*m[j] + (1-b1)*scaled_grad; v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; float update = (m[j]/denom) + (decay*p[j]); p[j] = p[j] - (step_size*update); if (p_copy != NULL) p_copy[j] = (GRAD_T) p[j]; } } template <int DEPTH, typename T, typename GRAD_T> struct AdamFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<DEPTH>& tl, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, adamMode_t mode, const float decay) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* p = (T *)tl.addresses[0][tensor_loc]; p += chunk_idx*chunk_size; T* m = (T *)tl.addresses[1][tensor_loc]; m += chunk_idx*chunk_size; T* v = (T *)tl.addresses[2][tensor_loc]; v += chunk_idx*chunk_size; GRAD_T* g = (GRAD_T *)tl.addresses[3][tensor_loc]; g += chunk_idx*chunk_size; GRAD_T* p_copy = NULL; if (DEPTH == 5) { p_copy = (GRAD_T *)tl.addresses[4][tensor_loc]; p_copy += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; T incoming_p[ILP]; T incoming_m[ILP]; T incoming_v[ILP]; T incoming_g[ILP]; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(m) && is_aligned(v) && is_aligned(g) && is_aligned(p_copy)) { for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load GRAD_T tmp_g[ILP]; load_store(incoming_p, p, 0, i_start); load_store(incoming_m, m, 0, i_start); load_store(incoming_v, v, 0, i_start); load_store(tmp_g, g, 0, i_start); #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_g[ii] = static_cast<T>(tmp_g[ii]); T scaled_grad = incoming_g[ii]/grad_scale; incoming_m[ii] = b1*incoming_m[ii] + (1-b1)*scaled_grad; incoming_v[ii] = b2*incoming_v[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(incoming_v[ii] + eps); else // Mode 1 denom = sqrtf(incoming_v[ii]) + eps; float update = (incoming_m[ii]/denom) + (decay*incoming_p[ii]); incoming_p[ii] = incoming_p[ii] - (step_size*update); if (DEPTH == 5) tmp_g[ii] = static_cast<GRAD_T>(incoming_p[ii]); } load_store(p, incoming_p, i_start, 0); load_store(m, incoming_m, i_start, 0); load_store(v, incoming_v, i_start, 0); if (DEPTH == 5) load_store(p_copy, tmp_g, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_p[ii] = 0; incoming_m[ii] = 0; incoming_v[ii] = 0; incoming_g[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if (i < n && i < chunk_size) { incoming_p[ii] = p[i]; incoming_m[ii] = m[i]; incoming_v[ii] = v[i]; incoming_g[ii] = static_cast<T>(g[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = i_start + threadIdx.x + ii*blockDim.x; if(j < n && j < chunk_size) { T scaled_grad = incoming_g[ii]/grad_scale; m[j] = b1*incoming_m[ii] + (1-b1)*scaled_grad; v[j] = b2*incoming_v[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; float update = (m[j]/denom) + (decay*incoming_p[ii]); p[j] = incoming_p[ii] - (step_size*update); if (DEPTH == 5) p_copy[j] = (GRAD_T) p[j]; } } } } } }; void fused_adam_cuda( at::Tensor & p, at::Tensor & p_copy, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { // using namespace at; //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - ::pow(beta1, step); const float bias_correction2 = 1 - ::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( adam_cuda_kernel<accscalar_t, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, p.DATA_PTR<accscalar_t>(), p_copy.numel() ? p_copy.DATA_PTR<scalar_t_0>() : NULL, m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", hipLaunchKernelGGL(( adam_cuda_kernel<scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, p.DATA_PTR<scalar_t_0>(), NULL, //don't output p_copy for fp32, it's wasted write m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_HIP_CHECK(hipGetLastError()); } void fused_adam_cuda_mt( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, // p, m, v, g, p_copy float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - ::pow(beta1, step); const float bias_correction2 = 1 - ::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); size_t tl_sz = tensor_lists.size(); TORCH_CHECK(tl_sz == 4 || tl_sz == 5, "expected tensor lists of size 4 or 5"); if (tensor_lists[3][0].scalar_type() == at::ScalarType::Half) { //alher values should be fp32 for half gradients TORCH_CHECK(tensor_lists[0][0].scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dich is done on the gradient type if (tl_sz == 5) { DISPATCH_FLOAT_AND_HALF(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<5, accscalar_t, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } else { DISPATCH_FLOAT_AND_HALF(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<4, accscalar_t, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } } else { if (tl_sz == 5) { DISPATCH_DOUBLE_AND_FLOAT(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<5, scalar_t_0, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } else { DISPATCH_DOUBLE_AND_FLOAT(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<4, scalar_t_0, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } } C10_HIP_CHECK(hipGetLastError()); } template <typename FROM_T, typename TO_T> __device__ void convert(const FROM_T vi, TO_T& vo) { vo = static_cast<TO_T>(vi); } template <> __device__ void convert(const float vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = vi; s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, float& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = static_cast<float>(t.as_half); } template <> __device__ void convert(const at::Half vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = static_cast<float>(vi); s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, at::Half& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = t.as_half; } template <typename GRAD_T> __global__ void strided_check_finite_cuda_kernel( volatile int* noop_gmem, GRAD_T* __restrict__ p_copy, const size_t tsize, int stride, int clear_overflow_first) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock) * stride; const int totThreads = gridDim.x*gridDim.y*threadsPerBlock*stride; if (clear_overflow_first) { if (i == 0) { *noop_gmem = 0; } __syncthreads(); } for (int j = i; j < tsize; j+=totThreads) { GRAD_T pi = p_copy[j]; if (!isfinite(pi)) { *noop_gmem = 1; } } } template <> __global__ void strided_check_finite_cuda_kernel( volatile int* noop_gmem, uint8_t* __restrict__ p_copy, const size_t tsize, int stride, int clear_overflow_first) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock) * stride; const int totThreads = gridDim.x*gridDim.y*threadsPerBlock*stride; if (clear_overflow_first) { if (i == 0) { *noop_gmem = 0; } __syncthreads(); } for (int j = i; j < tsize; j+=totThreads) { at::Half pi; convert(p_copy[j], pi); if (!isfinite(pi)) { *noop_gmem = 1; } } } template <typename FROM_T, typename TO_T> __global__ void maybe_cast_kernel( volatile int* overflow_flag, const FROM_T* p_in, TO_T* p_out, const size_t tsize) { if (overflow_flag && *overflow_flag != 0) return; //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; FROM_T pi[ILP]; TO_T po[ILP]; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { pi[ii] = 0; int j = j_start + i + totThreads*ii; if (j < tsize) { pi[ii] = p_in[j]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { convert(pi[ii], po[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i + totThreads*ii; if (j < tsize) { p_out[j] = po[ii]; } } } } template <typename T, typename GRAD_T, typename REDU_T> __global__ void reversible_adam_cuda_kernel( T* __restrict__ p, REDU_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; T mi[ILP]; T vi[ILP]; T pi[ILP]; T gi[ILP]; bool overflow = false; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { mi[ii] = T(0); vi[ii] = T(0); pi[ii] = T(0); gi[ii] = GRAD_T(0); int j = j_start + i + totThreads*ii; if (j < tsize) { pi[ii] = p[j]; mi[ii] = m[j]; vi[ii] = v[j]; gi[ii] = static_cast<T>(g[j]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = gi[ii]/grad_scale; if (isfinite(scaled_grad)) { mi[ii] = b1*mi[ii] + (1-b1)*scaled_grad; vi[ii] = b2*vi[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vi[ii] + eps); else // Mode 1 denom = sqrtf(vi[ii]) + eps; float update = (mi[ii]/denom) + (decay*pi[ii]); pi[ii] = pi[ii] - (step_size*update); } else { overflow = true; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i + totThreads*ii; if (j < tsize) { m[j] = mi[ii]; v[j] = vi[ii]; p[j] = pi[ii]; if (p_copy != NULL) { convert(pi[ii], p_copy[j]); } } } } if (p_copy != NULL) { __syncthreads(); if (overflow) { convert(float(INFINITY), p_copy[0]); } } } template <typename T, typename GRAD_T> __global__ void maybe_adam_undo_cuda_kernel( volatile int* overflow_flag, T* __restrict__ p, T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { // NB! Skip undo kernel when overflow flag is NOT set if (overflow_flag && *overflow_flag == 0) return; //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; T mi[ILP]; T vi[ILP]; T pi[ILP]; T gi[ILP]; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { mi[ii] = T(0); vi[ii] = T(0); pi[ii] = T(0); gi[ii] = GRAD_T(0); int j = j_start + i*ILP; if (j < tsize) { pi[ii] = p[j]; mi[ii] = m[j]; vi[ii] = v[j]; gi[ii] = static_cast<T>(g[j]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = gi[ii]/grad_scale; if (isfinite(scaled_grad)) { float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vi[ii] + eps); else // Mode 1 denom = sqrtf(vi[ii]) + eps; pi[ii] = (pi[ii] + step_size*(mi[ii]/denom)) / (1.0f - step_size*decay); mi[ii] = (mi[ii] - (1-b1)*scaled_grad) / b1; vi[ii] = (vi[ii] - (1-b2)*scaled_grad*scaled_grad) / b2; // Make sure round off errors don't create (small) negative value. // This can happen if we have to revert the very first step. vi[ii] = vi[ii] >= 0.0f ? vi[ii] : 0.0f; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i*ILP; if (j < tsize) { m[j] = mi[ii]; v[j] = vi[ii]; p[j] = pi[ii]; } } } } template <int DEPTH, typename FROM_T, typename TO_T> struct MaybeCastFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* overflow_flag, TensorListMetadata<DEPTH>& tl) { if (overflow_flag && *overflow_flag != 0) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; FROM_T* p_in = (FROM_T *)tl.addresses[0][tensor_loc]; p_in += chunk_idx*chunk_size; TO_T* p_out = (TO_T *)tl.addresses[1][tensor_loc]; p_out += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; int dim = chunk_size < n ? chunk_size : n; FROM_T pi[ILP]; TO_T po[ILP]; for(int j_start = 0; j_start < dim; j_start+=blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { pi[ii] = FROM_T(0); int j = j_start + threadIdx.x + ii*blockDim.x; if (j < dim) { pi[ii] = p_in[j]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { convert(pi[ii], po[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + threadIdx.x + ii*blockDim.x; if (j < dim) { p_out[j] = po[ii]; } } } } }; void fused_strided_check_finite( at::Tensor & overflow_flag, at::Tensor & p_copy, int stride, int clear_overflow_first) { //Get tensor size int tsize = p_copy.numel(); int niter = (tsize + stride - 1) / stride; //Determine #threads and #blocks const int threadsPerBlock = 512; //In order to avoid race condition, blocks must be 1 when clear_overflow_first flag is set. const dim3 blocks(clear_overflow_first ? 1 : (niter+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p_copy), "parameter tensor is too large to be indexed with int32"); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_HALF_AND_BYTE(p_copy.scalar_type(), 0, "check_finite_cuda_kernel", hipLaunchKernelGGL(( strided_check_finite_cuda_kernel<scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, overflow_flag.DATA_PTR<int>(), p_copy.DATA_PTR<scalar_t_0>(), tsize, stride, clear_overflow_first); ); C10_HIP_CHECK(hipGetLastError()); } void fused_reversible_adam_cuda( at::Tensor & p, at::Tensor & p_copy, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { // using namespace at; //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - ::pow(beta1, step); const float bias_correction2 = 1 - ::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors if (p_copy.numel() == 0 || p_copy.scalar_type() == g.scalar_type()) { DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( reversible_adam_cuda_kernel<accscalar_t, scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, p.DATA_PTR<accscalar_t>(), p_copy.numel() ? p_copy.DATA_PTR<scalar_t_0>() : NULL, m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { TORCH_CHECK(p_copy.scalar_type() == at::ScalarType::Byte, "expected parameter to be of byte type"); DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_e5m2_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( reversible_adam_cuda_kernel<accscalar_t, scalar_t_0, uint8_t>), dim3(blocks),dim3(threadsPerBlock), 0, stream, p.DATA_PTR<accscalar_t>(), p_copy.DATA_PTR<uint8_t>(), m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", hipLaunchKernelGGL(( reversible_adam_cuda_kernel<scalar_t_0, scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, p.DATA_PTR<scalar_t_0>(), NULL, //don't output p_copy for fp32, it's wasted write m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_HIP_CHECK(hipGetLastError()); } void maybe_cast_cuda( at::Tensor & overflow_flag, at::Tensor & p_in, at::Tensor & p_out) { //Get tensor size int tsize = p_in.numel(); TORCH_CHECK(tsize == p_out.numel(), "p_in.numel() must equal p_out.numel()"); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p_in), "parameter tensor is too large to be indexed with int32"); //Constants hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); DISPATCH_FLOAT_HALF_AND_BYTE(p_in.scalar_type(), 0, "maybe_cast_cuda" DISPATCH_FLOAT_HALF_AND_BYTE(p_out.scalar_type(), 1, "maybe_cast_cuda", hipLaunchKernelGGL(( maybe_cast_kernel<scalar_t_0,scalar_t_1>), dim3(blocks),dim3(threadsPerBlock), 0, stream, overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p_in.DATA_PTR<scalar_t_0>(), p_out.DATA_PTR<scalar_t_1>(), tsize); )) C10_HIP_CHECK(hipGetLastError()); } void maybe_cast_cuda_mt( int chunk_size, at::Tensor overflow_flag, std::vector<std::vector<at::Tensor>> tensor_lists) // p_in, p_out { //Constants hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); size_t tl_sz = tensor_lists.size(); TORCH_CHECK(tl_sz == 2, "expected tensor lists of size 2"); DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[0][0].scalar_type(), 0, "maybe_cast_cuda_mt_kernel", DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[1][0].scalar_type(), 1, "maybe_cast_cuda_mt_kernel", multi_tensor_apply<2>( BLOCK_SIZE, chunk_size, overflow_flag, tensor_lists, MaybeCastFunctor<2, scalar_t_0, scalar_t_1>()); )) C10_HIP_CHECK(hipGetLastError()); } void fused_maybe_adam_undo_cuda( at::Tensor & overflow_flag, at::Tensor & p, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - ::pow(beta1, step); const float bias_correction2 = 1 - ::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; hipLaunchKernelGGL(( maybe_adam_undo_cuda_kernel<accscalar_t, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p.DATA_PTR<accscalar_t>(), m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", hipLaunchKernelGGL(( maybe_adam_undo_cuda_kernel<scalar_t_0, scalar_t_0>), dim3(blocks),dim3(threadsPerBlock), 0, stream, overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p.DATA_PTR<scalar_t_0>(), m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_HIP_CHECK(hipGetLastError()); }
3339ccb7046183f560b7f3cec2f65cc8dfb89660.cu
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <cmath> #include "ATen/ATen.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/cuda/detail/IndexUtils.cuh" #include "ATen/TensorUtils.h" // #include "ATen/Type.h" #include "ATen/AccumulateType.h" #include "multi_tensor_apply.cuh" #define BLOCK_SIZE 512 #define ILP 4 template<typename T> __device__ __forceinline__ bool is_aligned(T* p){ return ((uint64_t)p) % (ILP*sizeof(T)) == 0; } template<typename T> __device__ __forceinline__ void load_store(T* dst, T* src, int dst_offset, int src_offset){ typedef typename std::aligned_storage<ILP*sizeof(T), ILP*alignof(T)>::type LT; ((LT*)dst)[dst_offset] = ((LT*)src)[src_offset]; } #include "type_shim.h" typedef enum{ ADAM_MODE_0 =0, // eps under square root ADAM_MODE_1 =1 // eps outside square root } adamMode_t; template <typename T, typename GRAD_T> __global__ void adam_cuda_kernel( T* __restrict__ p, GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; for (int j = i; j < tsize; j+=totThreads) { T scaled_grad = g[j]/grad_scale; m[j] = b1*m[j] + (1-b1)*scaled_grad; v[j] = b2*v[j] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; float update = (m[j]/denom) + (decay*p[j]); p[j] = p[j] - (step_size*update); if (p_copy != NULL) p_copy[j] = (GRAD_T) p[j]; } } template <int DEPTH, typename T, typename GRAD_T> struct AdamFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* noop_gmem, TensorListMetadata<DEPTH>& tl, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, adamMode_t mode, const float decay) { int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; T* p = (T *)tl.addresses[0][tensor_loc]; p += chunk_idx*chunk_size; T* m = (T *)tl.addresses[1][tensor_loc]; m += chunk_idx*chunk_size; T* v = (T *)tl.addresses[2][tensor_loc]; v += chunk_idx*chunk_size; GRAD_T* g = (GRAD_T *)tl.addresses[3][tensor_loc]; g += chunk_idx*chunk_size; GRAD_T* p_copy = NULL; if (DEPTH == 5) { p_copy = (GRAD_T *)tl.addresses[4][tensor_loc]; p_copy += chunk_idx*chunk_size; } n -= chunk_idx*chunk_size; T incoming_p[ILP]; T incoming_m[ILP]; T incoming_v[ILP]; T incoming_g[ILP]; // to make things simple, we put aligned case in a different code path if(n % ILP == 0 && chunk_size % ILP == 0 && is_aligned(p) && is_aligned(m) && is_aligned(v) && is_aligned(g) && is_aligned(p_copy)) { for(int i_start = threadIdx.x; i_start*ILP < n && i_start*ILP < chunk_size; i_start += blockDim.x) { // load GRAD_T tmp_g[ILP]; load_store(incoming_p, p, 0, i_start); load_store(incoming_m, m, 0, i_start); load_store(incoming_v, v, 0, i_start); load_store(tmp_g, g, 0, i_start); #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_g[ii] = static_cast<T>(tmp_g[ii]); T scaled_grad = incoming_g[ii]/grad_scale; incoming_m[ii] = b1*incoming_m[ii] + (1-b1)*scaled_grad; incoming_v[ii] = b2*incoming_v[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(incoming_v[ii] + eps); else // Mode 1 denom = sqrtf(incoming_v[ii]) + eps; float update = (incoming_m[ii]/denom) + (decay*incoming_p[ii]); incoming_p[ii] = incoming_p[ii] - (step_size*update); if (DEPTH == 5) tmp_g[ii] = static_cast<GRAD_T>(incoming_p[ii]); } load_store(p, incoming_p, i_start, 0); load_store(m, incoming_m, i_start, 0); load_store(v, incoming_v, i_start, 0); if (DEPTH == 5) load_store(p_copy, tmp_g, i_start, 0); } } else { for(int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { incoming_p[ii] = 0; incoming_m[ii] = 0; incoming_v[ii] = 0; incoming_g[ii] = 0; int i = i_start + threadIdx.x + ii*blockDim.x; if (i < n && i < chunk_size) { incoming_p[ii] = p[i]; incoming_m[ii] = m[i]; incoming_v[ii] = v[i]; incoming_g[ii] = static_cast<T>(g[i]); } } // note for clarification to future michael: // From a pure memory dependency perspective, there's likely no point unrolling // the write loop, since writes just fire off once their LDGs arrive. // Put another way, the STGs are dependent on the LDGs, but not on each other. // There is still compute ILP benefit from unrolling the loop though. #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = i_start + threadIdx.x + ii*blockDim.x; if(j < n && j < chunk_size) { T scaled_grad = incoming_g[ii]/grad_scale; m[j] = b1*incoming_m[ii] + (1-b1)*scaled_grad; v[j] = b2*incoming_v[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(v[j] + eps); else // Mode 1 denom = sqrtf(v[j]) + eps; float update = (m[j]/denom) + (decay*incoming_p[ii]); p[j] = incoming_p[ii] - (step_size*update); if (DEPTH == 5) p_copy[j] = (GRAD_T) p[j]; } } } } } }; void fused_adam_cuda( at::Tensor & p, at::Tensor & p_copy, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { // using namespace at; //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - std::pow(beta1, step); const float bias_correction2 = 1 - std::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; adam_cuda_kernel<accscalar_t, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( p.DATA_PTR<accscalar_t>(), p_copy.numel() ? p_copy.DATA_PTR<scalar_t_0>() : NULL, m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", adam_cuda_kernel<scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( p.DATA_PTR<scalar_t_0>(), NULL, //don't output p_copy for fp32, it's wasted write m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_CUDA_CHECK(cudaGetLastError()); } void fused_adam_cuda_mt( int chunk_size, at::Tensor noop_flag, std::vector<std::vector<at::Tensor>> tensor_lists, // p, m, v, g, p_copy float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - std::pow(beta1, step); const float bias_correction2 = 1 - std::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); size_t tl_sz = tensor_lists.size(); TORCH_CHECK(tl_sz == 4 || tl_sz == 5, "expected tensor lists of size 4 or 5"); if (tensor_lists[3][0].scalar_type() == at::ScalarType::Half) { //alher values should be fp32 for half gradients TORCH_CHECK(tensor_lists[0][0].scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dich is done on the gradient type if (tl_sz == 5) { DISPATCH_FLOAT_AND_HALF(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<5, accscalar_t, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } else { DISPATCH_FLOAT_AND_HALF(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<4, accscalar_t, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } } else { if (tl_sz == 5) { DISPATCH_DOUBLE_AND_FLOAT(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", multi_tensor_apply<5>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<5, scalar_t_0, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } else { DISPATCH_DOUBLE_AND_FLOAT(tensor_lists[3][0].scalar_type(), 0, "adam_cuda_mt_kernel", multi_tensor_apply<4>( BLOCK_SIZE, chunk_size, noop_flag, tensor_lists, AdamFunctor<4, scalar_t_0, scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, (adamMode_t) mode, decay); ); } } C10_CUDA_CHECK(cudaGetLastError()); } template <typename FROM_T, typename TO_T> __device__ void convert(const FROM_T vi, TO_T& vo) { vo = static_cast<TO_T>(vi); } template <> __device__ void convert(const float vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = vi; s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, float& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = static_cast<float>(t.as_half); } template <> __device__ void convert(const at::Half vi, uint8_t& vo) { union S { float as_float; int as_int; }; S s; s.as_float = static_cast<float>(vi); s.as_int = s.as_int & 0xFF800000; union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_half = static_cast<at::Half>(vi + s.as_float / 8.0f); vo = t.as_byte[1]; } template <> __device__ void convert(const uint8_t vi, at::Half& vo) { union T { at::Half as_half; uint8_t as_byte[2]; }; T t; t.as_byte[0] = 0; t.as_byte[1] = vi; vo = t.as_half; } template <typename GRAD_T> __global__ void strided_check_finite_cuda_kernel( volatile int* noop_gmem, GRAD_T* __restrict__ p_copy, const size_t tsize, int stride, int clear_overflow_first) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock) * stride; const int totThreads = gridDim.x*gridDim.y*threadsPerBlock*stride; if (clear_overflow_first) { if (i == 0) { *noop_gmem = 0; } __syncthreads(); } for (int j = i; j < tsize; j+=totThreads) { GRAD_T pi = p_copy[j]; if (!isfinite(pi)) { *noop_gmem = 1; } } } template <> __global__ void strided_check_finite_cuda_kernel( volatile int* noop_gmem, uint8_t* __restrict__ p_copy, const size_t tsize, int stride, int clear_overflow_first) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock) * stride; const int totThreads = gridDim.x*gridDim.y*threadsPerBlock*stride; if (clear_overflow_first) { if (i == 0) { *noop_gmem = 0; } __syncthreads(); } for (int j = i; j < tsize; j+=totThreads) { at::Half pi; convert(p_copy[j], pi); if (!isfinite(pi)) { *noop_gmem = 1; } } } template <typename FROM_T, typename TO_T> __global__ void maybe_cast_kernel( volatile int* overflow_flag, const FROM_T* p_in, TO_T* p_out, const size_t tsize) { if (overflow_flag && *overflow_flag != 0) return; //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; FROM_T pi[ILP]; TO_T po[ILP]; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { pi[ii] = 0; int j = j_start + i + totThreads*ii; if (j < tsize) { pi[ii] = p_in[j]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { convert(pi[ii], po[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i + totThreads*ii; if (j < tsize) { p_out[j] = po[ii]; } } } } template <typename T, typename GRAD_T, typename REDU_T> __global__ void reversible_adam_cuda_kernel( T* __restrict__ p, REDU_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; T mi[ILP]; T vi[ILP]; T pi[ILP]; T gi[ILP]; bool overflow = false; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { mi[ii] = T(0); vi[ii] = T(0); pi[ii] = T(0); gi[ii] = GRAD_T(0); int j = j_start + i + totThreads*ii; if (j < tsize) { pi[ii] = p[j]; mi[ii] = m[j]; vi[ii] = v[j]; gi[ii] = static_cast<T>(g[j]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = gi[ii]/grad_scale; if (isfinite(scaled_grad)) { mi[ii] = b1*mi[ii] + (1-b1)*scaled_grad; vi[ii] = b2*vi[ii] + (1-b2)*scaled_grad*scaled_grad; float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vi[ii] + eps); else // Mode 1 denom = sqrtf(vi[ii]) + eps; float update = (mi[ii]/denom) + (decay*pi[ii]); pi[ii] = pi[ii] - (step_size*update); } else { overflow = true; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i + totThreads*ii; if (j < tsize) { m[j] = mi[ii]; v[j] = vi[ii]; p[j] = pi[ii]; if (p_copy != NULL) { convert(pi[ii], p_copy[j]); } } } } if (p_copy != NULL) { __syncthreads(); if (overflow) { convert(float(INFINITY), p_copy[0]); } } } template <typename T, typename GRAD_T> __global__ void maybe_adam_undo_cuda_kernel( volatile int* overflow_flag, T* __restrict__ p, T* __restrict__ m, T* __restrict__ v, const GRAD_T * __restrict__ g, const float b1, const float b2, const float eps, const float grad_scale, const float step_size, const size_t tsize, adamMode_t mode, const float decay) { // NB! Skip undo kernel when overflow flag is NOT set if (overflow_flag && *overflow_flag == 0) return; //Assuming 2D grids and 2D blocks const int blockId = gridDim.x * blockIdx.y + blockIdx.x; const int threadsPerBlock = blockDim.x * blockDim.y; const int threadIdInBlock = threadIdx.y * blockDim.x + threadIdx.x; const int i = (blockId * threadsPerBlock + threadIdInBlock); const int totThreads = gridDim.x*gridDim.y*threadsPerBlock; T mi[ILP]; T vi[ILP]; T pi[ILP]; T gi[ILP]; for(int j_start = 0; j_start < tsize; j_start+=totThreads*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { mi[ii] = T(0); vi[ii] = T(0); pi[ii] = T(0); gi[ii] = GRAD_T(0); int j = j_start + i*ILP; if (j < tsize) { pi[ii] = p[j]; mi[ii] = m[j]; vi[ii] = v[j]; gi[ii] = static_cast<T>(g[j]); } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { T scaled_grad = gi[ii]/grad_scale; if (isfinite(scaled_grad)) { float denom; if (mode == ADAM_MODE_0) denom = sqrtf(vi[ii] + eps); else // Mode 1 denom = sqrtf(vi[ii]) + eps; pi[ii] = (pi[ii] + step_size*(mi[ii]/denom)) / (1.0f - step_size*decay); mi[ii] = (mi[ii] - (1-b1)*scaled_grad) / b1; vi[ii] = (vi[ii] - (1-b2)*scaled_grad*scaled_grad) / b2; // Make sure round off errors don't create (small) negative value. // This can happen if we have to revert the very first step. vi[ii] = vi[ii] >= 0.0f ? vi[ii] : 0.0f; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + i*ILP; if (j < tsize) { m[j] = mi[ii]; v[j] = vi[ii]; p[j] = pi[ii]; } } } } template <int DEPTH, typename FROM_T, typename TO_T> struct MaybeCastFunctor { __device__ __forceinline__ void operator()( int chunk_size, volatile int* overflow_flag, TensorListMetadata<DEPTH>& tl) { if (overflow_flag && *overflow_flag != 0) return; int tensor_loc = tl.block_to_tensor[blockIdx.x]; int chunk_idx = tl.block_to_chunk[blockIdx.x]; int n = tl.sizes[tensor_loc]; FROM_T* p_in = (FROM_T *)tl.addresses[0][tensor_loc]; p_in += chunk_idx*chunk_size; TO_T* p_out = (TO_T *)tl.addresses[1][tensor_loc]; p_out += chunk_idx*chunk_size; n -= chunk_idx*chunk_size; int dim = chunk_size < n ? chunk_size : n; FROM_T pi[ILP]; TO_T po[ILP]; for(int j_start = 0; j_start < dim; j_start+=blockDim.x*ILP) { #pragma unroll for(int ii = 0; ii < ILP; ii++) { pi[ii] = FROM_T(0); int j = j_start + threadIdx.x + ii*blockDim.x; if (j < dim) { pi[ii] = p_in[j]; } } #pragma unroll for(int ii = 0; ii < ILP; ii++) { convert(pi[ii], po[ii]); } #pragma unroll for(int ii = 0; ii < ILP; ii++) { int j = j_start + threadIdx.x + ii*blockDim.x; if (j < dim) { p_out[j] = po[ii]; } } } } }; void fused_strided_check_finite( at::Tensor & overflow_flag, at::Tensor & p_copy, int stride, int clear_overflow_first) { //Get tensor size int tsize = p_copy.numel(); int niter = (tsize + stride - 1) / stride; //Determine #threads and #blocks const int threadsPerBlock = 512; //In order to avoid race condition, blocks must be 1 when clear_overflow_first flag is set. const dim3 blocks(clear_overflow_first ? 1 : (niter+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p_copy), "parameter tensor is too large to be indexed with int32"); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_HALF_AND_BYTE(p_copy.scalar_type(), 0, "check_finite_cuda_kernel", strided_check_finite_cuda_kernel<scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( overflow_flag.DATA_PTR<int>(), p_copy.DATA_PTR<scalar_t_0>(), tsize, stride, clear_overflow_first); ); C10_CUDA_CHECK(cudaGetLastError()); } void fused_reversible_adam_cuda( at::Tensor & p, at::Tensor & p_copy, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { // using namespace at; //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - std::pow(beta1, step); const float bias_correction2 = 1 - std::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors if (p_copy.numel() == 0 || p_copy.scalar_type() == g.scalar_type()) { DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; reversible_adam_cuda_kernel<accscalar_t, scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( p.DATA_PTR<accscalar_t>(), p_copy.numel() ? p_copy.DATA_PTR<scalar_t_0>() : NULL, m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { TORCH_CHECK(p_copy.scalar_type() == at::ScalarType::Byte, "expected parameter to be of byte type"); DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_e5m2_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; reversible_adam_cuda_kernel<accscalar_t, scalar_t_0, uint8_t><<<blocks,threadsPerBlock, 0, stream>>>( p.DATA_PTR<accscalar_t>(), p_copy.DATA_PTR<uint8_t>(), m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", reversible_adam_cuda_kernel<scalar_t_0, scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( p.DATA_PTR<scalar_t_0>(), NULL, //don't output p_copy for fp32, it's wasted write m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_CUDA_CHECK(cudaGetLastError()); } void maybe_cast_cuda( at::Tensor & overflow_flag, at::Tensor & p_in, at::Tensor & p_out) { //Get tensor size int tsize = p_in.numel(); TORCH_CHECK(tsize == p_out.numel(), "p_in.numel() must equal p_out.numel()"); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p_in), "parameter tensor is too large to be indexed with int32"); //Constants cudaStream_t stream = at::cuda::getCurrentCUDAStream(); DISPATCH_FLOAT_HALF_AND_BYTE(p_in.scalar_type(), 0, "maybe_cast_cuda" DISPATCH_FLOAT_HALF_AND_BYTE(p_out.scalar_type(), 1, "maybe_cast_cuda", maybe_cast_kernel<scalar_t_0,scalar_t_1><<<blocks,threadsPerBlock, 0, stream>>>( overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p_in.DATA_PTR<scalar_t_0>(), p_out.DATA_PTR<scalar_t_1>(), tsize); )) C10_CUDA_CHECK(cudaGetLastError()); } void maybe_cast_cuda_mt( int chunk_size, at::Tensor overflow_flag, std::vector<std::vector<at::Tensor>> tensor_lists) // p_in, p_out { //Constants cudaStream_t stream = at::cuda::getCurrentCUDAStream(); size_t tl_sz = tensor_lists.size(); TORCH_CHECK(tl_sz == 2, "expected tensor lists of size 2"); DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[0][0].scalar_type(), 0, "maybe_cast_cuda_mt_kernel", DISPATCH_FLOAT_HALF_AND_BYTE(tensor_lists[1][0].scalar_type(), 1, "maybe_cast_cuda_mt_kernel", multi_tensor_apply<2>( BLOCK_SIZE, chunk_size, overflow_flag, tensor_lists, MaybeCastFunctor<2, scalar_t_0, scalar_t_1>()); )) C10_CUDA_CHECK(cudaGetLastError()); } void fused_maybe_adam_undo_cuda( at::Tensor & overflow_flag, at::Tensor & p, at::Tensor & m, at::Tensor & v, at::Tensor & g, float lr, float beta1, float beta2, float eps, float grad_scale, int step, int mode, int bias_correction, float decay) { //Get tensor size int tsize = p.numel(); //Determine #threads and #blocks const int threadsPerBlock = 512; const dim3 blocks((tsize+threadsPerBlock-1)/threadsPerBlock); TORCH_CHECK(at::cuda::detail::canUse32BitIndexMath(p), "parameter tensor is too large to be indexed with int32"); //Constants float step_size = 0; if (bias_correction == 1) { const float bias_correction1 = 1 - std::pow(beta1, step); const float bias_correction2 = 1 - std::pow(beta2, step); step_size = lr * std::sqrt(bias_correction2)/bias_correction1; } else { step_size = lr; } cudaStream_t stream = at::cuda::getCurrentCUDAStream(); if (g.scalar_type() == at::ScalarType::Half) { //all other values should be fp32 for half gradients TORCH_CHECK(p.scalar_type() == at::ScalarType::Float, "expected parameter to be of float type"); //dispatch is done on the gradient type using namespace at; // prevents "toString is undefined" errors DISPATCH_FLOAT_AND_HALF(g.scalar_type(), 0, "adam_cuda_kernel", using accscalar_t = at::acc_type<scalar_t_0, true>; maybe_adam_undo_cuda_kernel<accscalar_t, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p.DATA_PTR<accscalar_t>(), m.DATA_PTR<accscalar_t>(), v.DATA_PTR<accscalar_t>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } else { using namespace at; DISPATCH_DOUBLE_AND_FLOAT(g.scalar_type(), 0, "adam_cuda_kernel", maybe_adam_undo_cuda_kernel<scalar_t_0, scalar_t_0><<<blocks,threadsPerBlock, 0, stream>>>( overflow_flag.numel() ? overflow_flag.DATA_PTR<int>() : NULL, p.DATA_PTR<scalar_t_0>(), m.DATA_PTR<scalar_t_0>(), v.DATA_PTR<scalar_t_0>(), g.DATA_PTR<scalar_t_0>(), beta1, beta2, eps, grad_scale, step_size, tsize, (adamMode_t) mode, decay); ); } C10_CUDA_CHECK(cudaGetLastError()); }
e5096cc6944a87db8d5c7dfaa25993582d770268.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /// /// \file multiply_kernel.cuh /// \brief This file provide different kernel function definations \ /// of matrix multiply. a is M*K, b is K*N. c = a*b, c is M*N. /// /// \author Rudan Chen /// \date 2016-01-21 __global__ void kComputeMatMultiply_v1(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ const int idx = (blockIdx.x%M)*N + (blockIdx.x/M)*blockDim.x + threadIdx.x; float result = 0; for(int i=0; i<K; i++){ result += a[(blockIdx.x%M)*K+i]*b[i*N+(blockIdx.x/M)*blockDim.x+threadIdx.x]; } c[idx] = result; } __global__ void kComputeMatMultiply_v2(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float result[]; float local_result=0; for(int i=0; (i*blockDim.x+threadIdx.x)<K; i++){ local_result += a[blockIdx.x*K+i*blockDim.x+threadIdx.x]*b[(i*blockDim.x+threadIdx.x)*N+blockIdx.y]; } result[threadIdx.x] = local_result; __syncthreads(); for(int activeThreads = blockDim.x/2; activeThreads; activeThreads/=2){ if(threadIdx.x < activeThreads) result[threadIdx.x] += result[threadIdx.x + activeThreads]; __syncthreads(); } if(threadIdx.x == 0) c[blockIdx.x*N+blockIdx.y] = result[0]; __syncthreads(); } __global__ void kComputeMatMultiply_v3(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float sh_a[]; ///save one row of a, shared with b const int idx = blockIdx.x*N + threadIdx.x; int i = threadIdx.x; while(i<K){ sh_a[i] = a[blockIdx.x*K+i]; i += blockDim.x; } for(int j=0; j<(N/blockDim.x); j++){ float result = 0; for(int i=0; i<K; i++){ result += sh_a[i]*b[i*N + j*blockDim.x + threadIdx.x]; } c[idx + j*blockDim.x] = result; } } #define ASUB_HEIGHT 16 #define ASUB_WIDTH 32 #define BSUB_HEIGHT 32 #define BSUB_WIDTH 256 #define CSUB_HEIGHT 16 #define CSUB_WIDTH 256 /// thread number of one block is fixed at 128 /// each thread compute 16*2 region of c /// __global__ void kComputeMatMultiply_v4(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ __shared__ float sh_a[ASUB_HEIGHT*ASUB_WIDTH]; float local_c[CSUB_HEIGHT][2]; const int c_block_row = blockIdx.x / (N/CSUB_WIDTH); const int c_block_col = blockIdx.x % (N/CSUB_WIDTH); const int v1 = c_block_row*CSUB_HEIGHT; ///v1 is the tmp variable, so as the v2... const int v2 = c_block_col*CSUB_WIDTH; const int v3 = threadIdx.x*2; //copy c to local variable for(int i=0; i<CSUB_HEIGHT; i++){ local_c[i][0] = c[(v1+i)*N + v2 + v3]; local_c[i][1] = c[(v1+i)*N + v2 + v3 + 1]; } for(int i=0; i<(K/ASUB_WIDTH); i++){ const int v4 = i*ASUB_WIDTH; const int v5 = i*BSUB_HEIGHT; for(int j=0; j<4; j++){ int row_id = (threadIdx.x + j*blockDim.x)/ASUB_WIDTH; int col_id = (threadIdx.x + j*blockDim.x)%ASUB_WIDTH; sh_a[threadIdx.x + j*blockDim.x] = a[(v1+row_id)*K + v4 + col_id]; } __syncthreads(); for(int k=0; k<BSUB_HEIGHT; k++){ for(int m=0; m<CSUB_HEIGHT; m++){ local_c[m][0] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3]; local_c[m][1] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3 + 1]; } } __syncthreads(); } for(int i=0; i<CSUB_HEIGHT; i++){ c[(v1+i)*N + v2 + v3] = local_c[i][0]; c[(v1+i)*N + v2 + v3 + 1] = local_c[i][1]; } }
e5096cc6944a87db8d5c7dfaa25993582d770268.cu
/// /// \file multiply_kernel.cuh /// \brief This file provide different kernel function definations \ /// of matrix multiply. a is M*K, b is K*N. c = a*b, c is M*N. /// /// \author Rudan Chen /// \date 2016-01-21 __global__ void kComputeMatMultiply_v1(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ const int idx = (blockIdx.x%M)*N + (blockIdx.x/M)*blockDim.x + threadIdx.x; float result = 0; for(int i=0; i<K; i++){ result += a[(blockIdx.x%M)*K+i]*b[i*N+(blockIdx.x/M)*blockDim.x+threadIdx.x]; } c[idx] = result; } __global__ void kComputeMatMultiply_v2(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float result[]; float local_result=0; for(int i=0; (i*blockDim.x+threadIdx.x)<K; i++){ local_result += a[blockIdx.x*K+i*blockDim.x+threadIdx.x]*b[(i*blockDim.x+threadIdx.x)*N+blockIdx.y]; } result[threadIdx.x] = local_result; __syncthreads(); for(int activeThreads = blockDim.x/2; activeThreads; activeThreads/=2){ if(threadIdx.x < activeThreads) result[threadIdx.x] += result[threadIdx.x + activeThreads]; __syncthreads(); } if(threadIdx.x == 0) c[blockIdx.x*N+blockIdx.y] = result[0]; __syncthreads(); } __global__ void kComputeMatMultiply_v3(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float sh_a[]; ///save one row of a, shared with b const int idx = blockIdx.x*N + threadIdx.x; int i = threadIdx.x; while(i<K){ sh_a[i] = a[blockIdx.x*K+i]; i += blockDim.x; } for(int j=0; j<(N/blockDim.x); j++){ float result = 0; for(int i=0; i<K; i++){ result += sh_a[i]*b[i*N + j*blockDim.x + threadIdx.x]; } c[idx + j*blockDim.x] = result; } } #define ASUB_HEIGHT 16 #define ASUB_WIDTH 32 #define BSUB_HEIGHT 32 #define BSUB_WIDTH 256 #define CSUB_HEIGHT 16 #define CSUB_WIDTH 256 /// thread number of one block is fixed at 128 /// each thread compute 16*2 region of c /// __global__ void kComputeMatMultiply_v4(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ __shared__ float sh_a[ASUB_HEIGHT*ASUB_WIDTH]; float local_c[CSUB_HEIGHT][2]; const int c_block_row = blockIdx.x / (N/CSUB_WIDTH); const int c_block_col = blockIdx.x % (N/CSUB_WIDTH); const int v1 = c_block_row*CSUB_HEIGHT; ///v1 is the tmp variable, so as the v2... const int v2 = c_block_col*CSUB_WIDTH; const int v3 = threadIdx.x*2; //copy c to local variable for(int i=0; i<CSUB_HEIGHT; i++){ local_c[i][0] = c[(v1+i)*N + v2 + v3]; local_c[i][1] = c[(v1+i)*N + v2 + v3 + 1]; } for(int i=0; i<(K/ASUB_WIDTH); i++){ const int v4 = i*ASUB_WIDTH; const int v5 = i*BSUB_HEIGHT; for(int j=0; j<4; j++){ int row_id = (threadIdx.x + j*blockDim.x)/ASUB_WIDTH; int col_id = (threadIdx.x + j*blockDim.x)%ASUB_WIDTH; sh_a[threadIdx.x + j*blockDim.x] = a[(v1+row_id)*K + v4 + col_id]; } __syncthreads(); for(int k=0; k<BSUB_HEIGHT; k++){ for(int m=0; m<CSUB_HEIGHT; m++){ local_c[m][0] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3]; local_c[m][1] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3 + 1]; } } __syncthreads(); } for(int i=0; i<CSUB_HEIGHT; i++){ c[(v1+i)*N + v2 + v3] = local_c[i][0]; c[(v1+i)*N + v2 + v3 + 1] = local_c[i][1]; } }
75ec651d34d9e60541901b26c293609088142769.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<cstdio> #define imin(a,b) (a<b?a:b) //const int N = 33 * 1024; const int N = 8; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { hipEvent_t start, stop; float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; ( hipEventCreate( &start ) ); ( hipEventCreate( &stop ) ); ( hipEventRecord( start, 0 ) ); // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU (hipMalloc( (void**)&dev_a, N*sizeof(float) ) ); ( hipMalloc( (void**)&dev_b, N*sizeof(float) ) ); ( hipMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU ( hipMemcpy( dev_a, a, N*sizeof(float), hipMemcpyHostToDevice ) ); ( hipMemcpy( dev_b, b, N*sizeof(float), hipMemcpyHostToDevice ) ); hipLaunchKernelGGL(( dot), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU ( hipMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), hipMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Producto escalar %f\n", c); // 2 * sum_squares( (float)(N - 1) ) ); // get stop time, and display the timing results ( hipEventRecord( stop, 0 ) ); ( hipEventSynchronize( stop ) ); float elapsedTime; ( hipEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time to generate: %3.1f ms\n", elapsedTime ); ( hipEventDestroy( start ) ); ( hipEventDestroy( stop ) ); // free memory on the gpu side ( hipFree( dev_a ) ); ( hipFree( dev_b ) ); ( hipFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
75ec651d34d9e60541901b26c293609088142769.cu
#include<cstdio> #define imin(a,b) (a<b?a:b) //const int N = 33 * 1024; const int N = 8; const int threadsPerBlock = 256; const int blocksPerGrid = imin( 32, (N+threadsPerBlock-1) / threadsPerBlock ); __global__ void dot( float *a, float *b, float *c ) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cacheIndex = threadIdx.x; float temp = 0; while (tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cacheIndex] = temp; // synchronize threads in this block __syncthreads(); // for reductions, threadsPerBlock must be a power of 2 // because of the following code int i = blockDim.x/2; while (i != 0) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) c[blockIdx.x] = cache[0]; } int main( void ) { cudaEvent_t start, stop; float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; ( cudaEventCreate( &start ) ); ( cudaEventCreate( &stop ) ); ( cudaEventRecord( start, 0 ) ); // allocate memory on the cpu side a = (float*)malloc( N*sizeof(float) ); b = (float*)malloc( N*sizeof(float) ); partial_c = (float*)malloc( blocksPerGrid*sizeof(float) ); // allocate the memory on the GPU (cudaMalloc( (void**)&dev_a, N*sizeof(float) ) ); ( cudaMalloc( (void**)&dev_b, N*sizeof(float) ) ); ( cudaMalloc( (void**)&dev_partial_c, blocksPerGrid*sizeof(float) ) ); // fill in the host memory with data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } // copy the arrays 'a' and 'b' to the GPU ( cudaMemcpy( dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice ) ); ( cudaMemcpy( dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice ) ); dot<<<blocksPerGrid,threadsPerBlock>>>( dev_a, dev_b, dev_partial_c ); // copy the array 'c' back from the GPU to the CPU ( cudaMemcpy( partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost ) ); // finish up on the CPU side c = 0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) printf( "Producto escalar %f\n", c); // 2 * sum_squares( (float)(N - 1) ) ); // get stop time, and display the timing results ( cudaEventRecord( stop, 0 ) ); ( cudaEventSynchronize( stop ) ); float elapsedTime; ( cudaEventElapsedTime( &elapsedTime, start, stop ) ); printf( "Time to generate: %3.1f ms\n", elapsedTime ); ( cudaEventDestroy( start ) ); ( cudaEventDestroy( stop ) ); // free memory on the gpu side ( cudaFree( dev_a ) ); ( cudaFree( dev_b ) ); ( cudaFree( dev_partial_c ) ); // free memory on the cpu side free( a ); free( b ); free( partial_c ); }
248cf3ebfb4099e7ef432140607ff2adace55c01.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/filling.hpp> #include <cudf/types.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/repeat.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/thrust_rmm_allocator.h> #include <cudf/detail/nvtx/ranges.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/binary_search.h> #include <hip/hip_runtime.h> #include <limits> #include <memory> namespace { struct count_accessor { cudf::scalar const* p_scalar = nullptr; template <typename T> std::enable_if_t<std::is_integral<T>::value, cudf::size_type> operator()(hipStream_t stream = 0) { using ScalarType = cudf::experimental::scalar_type_t<T>; #if 1 // TODO: temporary till cudf::scalar's value() function is marked as const auto p_count = const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar)); #else auto p_count = static_cast<ScalarType const*>(this->p_scalar); #endif auto count = p_count->value(); // static_cast is necessary due to bool8 CUDF_EXPECTS(static_cast<int64_t>(count) <= std::numeric_limits<cudf::size_type>::max(), "count should not exceed size_type's limit."); return static_cast<cudf::size_type>(count); } template <typename T> std::enable_if_t<not std::is_integral<T>::value, cudf::size_type> operator()(hipStream_t stream) { CUDF_FAIL("count value should be a integral type."); } }; struct compute_offsets { cudf::column_view const* p_column = nullptr; template <typename T> std::enable_if_t<std::is_integral<T>::value, rmm::device_vector<cudf::size_type>> operator()(bool check_count, hipStream_t stream = 0) { // static_cast is necessary due to bool8 if (check_count && static_cast<int64_t>(std::numeric_limits<T>::max()) > std::numeric_limits<cudf::size_type>::max()) { auto max = thrust::reduce(p_column->begin<T>(), p_column->end<T>(), 0, thrust::maximum<T>()); CUDF_EXPECTS(max <= std::numeric_limits<cudf::size_type>::max(), "count should not have values larger than size_type's limit." ); } rmm::device_vector<cudf::size_type> offsets(p_column->size()); thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream), p_column->begin<T>(), p_column->end<T>(), offsets.begin()); if (check_count == true) { CUDF_EXPECTS(thrust::is_sorted(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end()) == true, "count has negative values or the resulting table has more \ rows than size_type's limit."); } return offsets; } template <typename T> std::enable_if_t<not std::is_integral<T>::value, rmm::device_vector<cudf::size_type>> operator()(bool check_count, hipStream_t stream) { CUDF_FAIL("count value should be a integral type."); } }; } namespace cudf { namespace experimental { namespace detail { std::unique_ptr<table> repeat(table_view const& input_table, column_view const& count, bool check_count, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(input_table.num_rows() == count.size(), "in and count must have equal size"); CUDF_EXPECTS(count.has_nulls() == false, "count cannot contain nulls"); if (input_table.num_rows() == 0) { return cudf::experimental::empty_like(input_table); } auto offsets = cudf::experimental::type_dispatcher(count.type(), compute_offsets{&count}, check_count, stream); size_type output_size{offsets.back()}; rmm::device_vector<size_type> indices(output_size); thrust::upper_bound(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(output_size), indices.begin()); return gather(input_table, indices.begin(), indices.end(), false, mr, stream); } std::unique_ptr<table> repeat(table_view const& input_table, scalar const& count, rmm::mr::device_memory_resource* mr, hipStream_t stream) { CUDF_EXPECTS(count.is_valid(), "count cannot be null"); auto stride = cudf::experimental::type_dispatcher( count.type(), count_accessor{&count}, stream); CUDF_EXPECTS(stride >= 0, "count value should be non-negative"); CUDF_EXPECTS(static_cast<int64_t>(input_table.num_rows()) * stride <= std::numeric_limits<size_type>::max(), "The resulting table has more rows than size_type's limit."); if ((input_table.num_rows() == 0) || (stride == 0)) { return cudf::experimental::empty_like(input_table); } auto output_size = input_table.num_rows() * stride; auto map_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [stride] __device__ (auto i) { return i / stride; }); auto map_end = map_begin + output_size; return gather(input_table, map_begin, map_end, false, mr, stream); } } // namespace detail std::unique_ptr<table> repeat(table_view const& input_table, column_view const& count, bool check_count, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat(input_table, count, check_count, mr, 0); } std::unique_ptr<table> repeat(table_view const& input_table, scalar const& count, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat(input_table, count, mr, 0); } } // namespace experimental } // namespace cudf
248cf3ebfb4099e7ef432140607ff2adace55c01.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/copying.hpp> #include <cudf/filling.hpp> #include <cudf/types.hpp> #include <cudf/column/column.hpp> #include <cudf/column/column_view.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/gather.cuh> #include <cudf/detail/repeat.hpp> #include <cudf/scalar/scalar.hpp> #include <cudf/table/table.hpp> #include <cudf/table/table_view.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <rmm/thrust_rmm_allocator.h> #include <cudf/detail/nvtx/ranges.hpp> #include <thrust/iterator/counting_iterator.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/iterator/transform_output_iterator.h> #include <thrust/scan.h> #include <thrust/binary_search.h> #include <cuda_runtime.h> #include <limits> #include <memory> namespace { struct count_accessor { cudf::scalar const* p_scalar = nullptr; template <typename T> std::enable_if_t<std::is_integral<T>::value, cudf::size_type> operator()(cudaStream_t stream = 0) { using ScalarType = cudf::experimental::scalar_type_t<T>; #if 1 // TODO: temporary till cudf::scalar's value() function is marked as const auto p_count = const_cast<ScalarType*>(static_cast<ScalarType const*>(this->p_scalar)); #else auto p_count = static_cast<ScalarType const*>(this->p_scalar); #endif auto count = p_count->value(); // static_cast is necessary due to bool8 CUDF_EXPECTS(static_cast<int64_t>(count) <= std::numeric_limits<cudf::size_type>::max(), "count should not exceed size_type's limit."); return static_cast<cudf::size_type>(count); } template <typename T> std::enable_if_t<not std::is_integral<T>::value, cudf::size_type> operator()(cudaStream_t stream) { CUDF_FAIL("count value should be a integral type."); } }; struct compute_offsets { cudf::column_view const* p_column = nullptr; template <typename T> std::enable_if_t<std::is_integral<T>::value, rmm::device_vector<cudf::size_type>> operator()(bool check_count, cudaStream_t stream = 0) { // static_cast is necessary due to bool8 if (check_count && static_cast<int64_t>(std::numeric_limits<T>::max()) > std::numeric_limits<cudf::size_type>::max()) { auto max = thrust::reduce(p_column->begin<T>(), p_column->end<T>(), 0, thrust::maximum<T>()); CUDF_EXPECTS(max <= std::numeric_limits<cudf::size_type>::max(), "count should not have values larger than size_type's limit." ); } rmm::device_vector<cudf::size_type> offsets(p_column->size()); thrust::inclusive_scan(rmm::exec_policy(stream)->on(stream), p_column->begin<T>(), p_column->end<T>(), offsets.begin()); if (check_count == true) { CUDF_EXPECTS(thrust::is_sorted(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end()) == true, "count has negative values or the resulting table has more \ rows than size_type's limit."); } return offsets; } template <typename T> std::enable_if_t<not std::is_integral<T>::value, rmm::device_vector<cudf::size_type>> operator()(bool check_count, cudaStream_t stream) { CUDF_FAIL("count value should be a integral type."); } }; } namespace cudf { namespace experimental { namespace detail { std::unique_ptr<table> repeat(table_view const& input_table, column_view const& count, bool check_count, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(input_table.num_rows() == count.size(), "in and count must have equal size"); CUDF_EXPECTS(count.has_nulls() == false, "count cannot contain nulls"); if (input_table.num_rows() == 0) { return cudf::experimental::empty_like(input_table); } auto offsets = cudf::experimental::type_dispatcher(count.type(), compute_offsets{&count}, check_count, stream); size_type output_size{offsets.back()}; rmm::device_vector<size_type> indices(output_size); thrust::upper_bound(rmm::exec_policy(stream)->on(stream), offsets.begin(), offsets.end(), thrust::make_counting_iterator(0), thrust::make_counting_iterator(output_size), indices.begin()); return gather(input_table, indices.begin(), indices.end(), false, mr, stream); } std::unique_ptr<table> repeat(table_view const& input_table, scalar const& count, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { CUDF_EXPECTS(count.is_valid(), "count cannot be null"); auto stride = cudf::experimental::type_dispatcher( count.type(), count_accessor{&count}, stream); CUDF_EXPECTS(stride >= 0, "count value should be non-negative"); CUDF_EXPECTS(static_cast<int64_t>(input_table.num_rows()) * stride <= std::numeric_limits<size_type>::max(), "The resulting table has more rows than size_type's limit."); if ((input_table.num_rows() == 0) || (stride == 0)) { return cudf::experimental::empty_like(input_table); } auto output_size = input_table.num_rows() * stride; auto map_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [stride] __device__ (auto i) { return i / stride; }); auto map_end = map_begin + output_size; return gather(input_table, map_begin, map_end, false, mr, stream); } } // namespace detail std::unique_ptr<table> repeat(table_view const& input_table, column_view const& count, bool check_count, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat(input_table, count, check_count, mr, 0); } std::unique_ptr<table> repeat(table_view const& input_table, scalar const& count, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::repeat(input_table, count, mr, 0); } } // namespace experimental } // namespace cudf
ec8ec8e860c51ff568788bac14658416b5dd9862.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/hip/Atomic.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/native/hip/SortingCommon.cuh> #include <ATen/AccumulateType.h> #include <THH/THHThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::hip::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partials_per_segment), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); C10_HIP_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { hipLaunchKernelGGL(( krn_partial_segment_offset), dim3(ceil_div(num_of_segments, 32)), dim3(32), 0, stream, partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); C10_HIP_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = ::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { hipLaunchKernelGGL(( compute_grad_weight_bags<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); } else { hipLaunchKernelGGL(( compute_grad_weight<scalar_t>), dim3(grid), dim3(block), 0, stream, orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); hipLaunchKernelGGL(( sum_and_scatter<scalar_t>), dim3(grid2), dim3(block), 0, stream, sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); C10_HIP_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } }}
ec8ec8e860c51ff568788bac14658416b5dd9862.cu
#include <ATen/ATen.h> #include <ATen/cuda/Atomic.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/TensorUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/native/cuda/SortingCommon.cuh> #include <ATen/AccumulateType.h> #include <THC/THCThrustAllocator.cuh> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/unique.h> #include <c10/macros/Macros.h> namespace at { namespace native { namespace { /* This code computes the sum of the weights in two-steps: 1) Each GPU warp sums `NROWS_PER_THREAD` number of row given by `indeces` 2) Each partial-sum from 1) are summed and scatter into `grad_weight` Notice, `NROWS_PER_THREAD` impacts the Achieved Occupancy of the kernel execution. If it is high, the size of the thread blocks will be too small to achieve good occupancy. Similarly, a very low value will make the size of the thread blocks in the final sum in step 2) too small. */ constexpr int NROWS_PER_THREAD = 10; // Fast ceil division (no overflow checking) __host__ __device__ __forceinline__ int64_t ceil_div(int64_t x, int64_t y) { return (x + y - 1) / y; } template <typename index_t> __global__ void krn_partials_per_segment(index_t *ret, const index_t *segment_offsets, int64_t num_of_segments, int64_t numel) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { const int64_t idx_start = segment_offsets[id]; const int64_t idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; const int64_t size = idx_end - idx_start; ret[id] = ceil_div(size, NROWS_PER_THREAD); } } template <typename index_t> __global__ void krn_partial_segment_offset( index_t *ret, const index_t *partials_per_segment, const index_t *partials_per_segment_offset, const index_t *segment_offsets, int64_t num_of_segments) { const int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < num_of_segments) { index_t idx = partials_per_segment_offset[id]; const index_t num_partials = partials_per_segment[id]; const index_t segment_offset = segment_offsets[id]; for (int64_t i=0; i<num_partials; ++i) { ret[idx++] = segment_offset + i * NROWS_PER_THREAD; } } } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight_bags( index_t *indices, scalar_t *gradOutput, index_t *offset2bag, index_t *count, ptrdiff_t numel, int64_t stride, int mode_mean, const index_t *bag_size, scalar_t* per_sample_weights, int64_t per_sample_weights_stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const int origRow = indices[idx]; const int seq_number = offset2bag[origRow]; const int gradOutputRow = seq_number * stride; acc_type<scalar_t, true> scale = count ? 1.0 / count[idx] : 1.0; if (per_sample_weights) { scale *= per_sample_weights[origRow * per_sample_weights_stride]; } acc_type<scalar_t, true> gradient = gradOutput[gradOutputRow + startFeature]; if (mode_mean) { gradient /= bag_size[seq_number]; } weight += gradient * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } template <typename scalar_t, typename index_t> __global__ void compute_grad_weight( index_t *indices, scalar_t *gradOutput, index_t *count, ptrdiff_t numel, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, acc_type<scalar_t, true> *grad_weight_per_segment, const int64_t stride_warped) { using accscalar_t = acc_type<scalar_t, true>; const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_offsets[id]; const int idx_end = (id == num_of_segments-1)?numel:segment_offsets[id+1]; accscalar_t weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { const index_t target_row = indices[idx]; const accscalar_t scale = count ? (accscalar_t)1.0 / count[idx] : 1.0; weight += gradOutput[target_row * stride + startFeature] * scale; } grad_weight_per_segment[id * stride + startFeature] = weight; } // This kernel assumes that all input tensors are contiguous. template <typename scalar_t, typename index_t> __global__ void sum_and_scatter( index_t *input, scalar_t *gradWeight, int64_t stride, index_t* segment_offsets, int64_t num_of_segments, const acc_type<scalar_t, true> *grad_weight_per_segment, const index_t *segment_sizes_offsets, int64_t num_of_partial_segments, const int64_t padding_idx, const int64_t stride_warped) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int id = gid / stride_warped; const int startFeature = gid % stride_warped; if (startFeature >= stride) { return; } if (id >= num_of_segments) { return; } const int idx_begin = segment_sizes_offsets[id]; const int idx_end = (id == num_of_segments-1)?num_of_partial_segments:segment_sizes_offsets[id+1]; acc_type<scalar_t, true> weight = 0; for (int idx=idx_begin; idx < idx_end; ++idx) { weight += grad_weight_per_segment[idx*stride + startFeature]; } int64_t target_row = input[segment_offsets[id]]; if (target_row != padding_idx) { gradWeight[target_row * stride + startFeature] = weight; } } } // anon namespace Tensor embedding_backward_cuda_kernel( const Tensor &grad, const Tensor &orig_indices, const Tensor &sorted_indices, const Tensor &count, int64_t num_weights, int padding_idx, bool mode_mean, const Tensor &offset2bag, const Tensor &bag_size, const Tensor &per_sample_weights) { auto stream = at::cuda::getCurrentCUDAStream(); auto allocator = THCThrustAllocator(globalContext().lazyInitCUDA()); auto policy = thrust::cuda::par(allocator).on(stream); const ptrdiff_t numel = sorted_indices.numel(); auto grad_weight = at::zeros({num_weights, grad.size(-1)}, grad.options()); const int64_t stride = grad_weight.stride(0); // Compute the number of segments and their start position so that we do not have to // spawn a warp per index. In this context, a segment is a number of rows that should // be summarized. // Unit: index in `sorted_indices` and `orig_indices` AT_DISPATCH_INDEX_TYPES(orig_indices.scalar_type(), "embedding_backward_cuda_kernel", [&] () { auto segment_offsets = at::empty({numel}, orig_indices.options()); int64_t num_of_segments; { auto sorted_indices_dev = thrust::device_ptr<index_t>(sorted_indices.data_ptr<index_t>()); auto dummy = at::empty_like(sorted_indices, LEGACY_CONTIGUOUS_MEMORY_FORMAT); auto dummy_dev = thrust::device_ptr<index_t>(dummy.data_ptr<index_t>()); auto ends = thrust::unique_by_key_copy( policy, sorted_indices_dev, sorted_indices_dev + numel, thrust::make_counting_iterator(0), dummy_dev, thrust::device_ptr<index_t>(segment_offsets.data_ptr<index_t>())); num_of_segments = thrust::get<0>(ends) - dummy_dev; } // We split the segments up into sizes of `NROWS_PER_THREAD` // Compute the number partial-segments per segment (some partial-segments // may not be the full `NROWS_PER_THREAD` number of rows) auto partials_per_segment = at::empty({num_of_segments}, orig_indices.options()); { krn_partials_per_segment<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partials_per_segment.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments, numel); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // In order to compute `partial_segment_offset`, which is the start index // of each partial-segment in `sorted_indices`, we need to compute the // start position of each _segment_ in `partial_segment_offset`. // Unit: index in `partial_segment_offset` auto partials_per_segment_offset = at::empty({num_of_segments}, orig_indices.options()); thrust::exclusive_scan( policy, thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()), thrust::device_ptr<index_t>(partials_per_segment.data_ptr<index_t>()+num_of_segments), thrust::device_ptr<index_t>(partials_per_segment_offset.data_ptr<index_t>())); // The total number of partial-segments is the sum of `partials_per_segment_offset` const int num_of_partial_segments = partials_per_segment[num_of_segments-1].item<index_t>() + partials_per_segment_offset[num_of_segments-1].item<index_t>(); // Now we can compute the start position of each partial-segment // Unit: index in `sorted_indices` and `orig_indices` auto partial_segment_offset = at::empty({num_of_partial_segments}, orig_indices.options()); { krn_partial_segment_offset<<<ceil_div(num_of_segments, 32), 32, 0, stream>>> ( partial_segment_offset.data_ptr<index_t>(), partials_per_segment.data_ptr<index_t>(), partials_per_segment_offset.data_ptr<index_t>(), segment_offsets.data_ptr<index_t>(), num_of_segments); C10_CUDA_KERNEL_LAUNCH_CHECK(); } const int stride_warped = ceil_div(stride, C10_WARP_SIZE)*C10_WARP_SIZE; const int block = std::min(stride_warped, MAX_BLOCK_SIZE); const int grid = ceil_div(num_of_partial_segments*stride_warped, block); AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, grad.scalar_type(), "embedding_bag_backward_cuda_compute_grad_weight", [&] { // For numerical stability, the dtype of `grad_weight_per_segment` // should match `acc_type` using partial_weight_t = acc_type<scalar_t, true>; TensorOptions op; if(grad.dtype() == at::kHalf || grad.dtype() == at::kBFloat16) { op = grad.options().dtype(at::kFloat); } else { op = grad.options(); } auto grad_weight_per_segment = at::empty({num_of_partial_segments, stride}, op); // Compute the sum of each partial-segment and handle bags if (offset2bag.defined()) { compute_grad_weight_bags<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), offset2bag.data_ptr<index_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, mode_mean, bag_size.data_ptr<index_t>(), per_sample_weights.defined() ? per_sample_weights.data_ptr<scalar_t>() : NULL, per_sample_weights.defined() ? per_sample_weights.stride(0) : 0, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); } else { compute_grad_weight<scalar_t><<<grid, block, 0, stream>>>( orig_indices.data_ptr<index_t>(), grad.data_ptr<scalar_t>(), count.defined() ? count.data_ptr<index_t>() : nullptr, numel, stride, partial_segment_offset.data_ptr<index_t>(), num_of_partial_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); } // Finally, we sum all the partial-sums and scatter them // into `grad_weight`. const int grid2 = ceil_div(num_of_segments*stride_warped, block); sum_and_scatter<scalar_t><<<grid2, block, 0, stream>>>( sorted_indices.data_ptr<index_t>(), grad_weight.data_ptr<scalar_t>(), stride, segment_offsets.data_ptr<index_t>(), num_of_segments, grad_weight_per_segment.data_ptr<partial_weight_t>(), partials_per_segment_offset.data_ptr<index_t>(), num_of_partial_segments, padding_idx, stride_warped); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); return grad_weight; } }}
b19c8da24435c0ec806133bb89ffacac7dff8c9e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "streamCompact.h" #include "scatter.h" int * ThrustStreamCompact(int * input, int size, int value){ int * output = new int[size]; //thrust::copy_if(input, input[size], output, is_not_zero()); return output; } int * StreamCompact(int * input, int * bool_input, int size, int cnt){ int * Md; int * Md2; int * Pd; int bsize = size * sizeof(int); int * output = new int[cnt]; //int * temp; hipMalloc(&Md, bsize); hipMalloc(&Md2, bsize); hipMalloc(&Pd, cnt*sizeof(int)); //hipMalloc(&temp, bsize); hipMemcpy(Md, input, bsize, hipMemcpyHostToDevice); hipMemcpy(Md2, bool_input, bsize, hipMemcpyHostToDevice); int numBlocks = (int)ceil(size/(float)block_size); //GPU version scatter hipLaunchKernelGGL(( GPUStreamCompact), dim3(numBlocks), dim3(block_size), 0, 0, Md, Md2, Pd, size); hipMemcpy(output, Pd, bsize, hipMemcpyDeviceToHost); hipFree(Md); hipFree(Md2); hipFree(Pd); return output; } __global__ void GPUStreamCompact(int *in, int * in2, int *out, int n){ int thid = blockDim.x * blockIdx.x + threadIdx.x; if(thid<n){ if(in2[thid]==1) out[thid] = in[thid]; } }
b19c8da24435c0ec806133bb89ffacac7dff8c9e.cu
#include "streamCompact.h" #include "scatter.h" int * ThrustStreamCompact(int * input, int size, int value){ int * output = new int[size]; //thrust::copy_if(input, input[size], output, is_not_zero()); return output; } int * StreamCompact(int * input, int * bool_input, int size, int cnt){ int * Md; int * Md2; int * Pd; int bsize = size * sizeof(int); int * output = new int[cnt]; //int * temp; cudaMalloc(&Md, bsize); cudaMalloc(&Md2, bsize); cudaMalloc(&Pd, cnt*sizeof(int)); //cudaMalloc(&temp, bsize); cudaMemcpy(Md, input, bsize, cudaMemcpyHostToDevice); cudaMemcpy(Md2, bool_input, bsize, cudaMemcpyHostToDevice); int numBlocks = (int)ceil(size/(float)block_size); //GPU version scatter GPUStreamCompact<<<numBlocks, block_size>>> (Md, Md2, Pd, size); cudaMemcpy(output, Pd, bsize, cudaMemcpyDeviceToHost); cudaFree(Md); cudaFree(Md2); cudaFree(Pd); return output; } __global__ void GPUStreamCompact(int *in, int * in2, int *out, int n){ int thid = blockDim.x * blockIdx.x + threadIdx.x; if(thid<n){ if(in2[thid]==1) out[thid] = in[thid]; } }
a7e2b3acbe42fba745a86f8b4aeb7b497e28be86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> using namespace std; __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __global__ void transpose_serial(float *a, float *b, int N) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { b[j+i*N] = a[i + j*N]; } } } __global__ void transpose_row_paralel(float *a, float *b, int N) { int i = threadIdx.x; for (int j=0; j<N; j++) { b[j+i*N] = a[i+j*N]; } } __global__ void transpose_element_paralel(float *a, float *b, int N) { // absolute (real) 2D location int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; b[j+i*N] = a[i+j*N]; } void test(int g1, int g2, int b1, int b2, int kernel, int matrixSize, bool showOutput) { float *h_a, *h_b; float *d_a, *d_b; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); hipMalloc((void **) &d_a, bytes); hipMalloc((void **) &d_b, bytes); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = 10*i+j; h_b[i*matrixSize + j] = 0; } } // init gpu arrays hipMemset(d_a, 0, bytes); hipMemset(d_b, 0, bytes); // copy to gpu hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); // kernel call if (kernel == 0) { timer.Start(); hipLaunchKernelGGL(( transpose_serial), dim3(dim3(g1, g2)), dim3(dim3(b1, b2)), 0, 0, d_a, d_b, matrixSize); timer.Stop(); } else if (kernel == 1) { timer.Start(); hipLaunchKernelGGL(( transpose_row_paralel), dim3(dim3(g1, g2)), dim3(dim3(b1, b2)), 0, 0, d_a, d_b, matrixSize); timer.Stop(); } else if (kernel == 2) { timer.Start(); hipLaunchKernelGGL(( transpose_element_paralel), dim3(dim3(g1, g2)), dim3(dim3(b1, b2)), 0, 0, d_a, d_b, matrixSize); timer.Stop(); } cout << timer.Elapsed() << "\t<<<" << g << ", " << b << ">>> " << "\n"; // copy to host hipMemcpy(h_b, d_b, bytes, hipMemcpyDeviceToHost); if (showOutput) { for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_b[i*matrixSize + j]; } cout << endl; } cout << "\n"; } hipFree(d_a); hipFree(d_b); } void test1(int g1, int g2, int b1, int b2, int matrixSize, bool showOutput) { float *h_a, *d_a; float *h_b, *d_b; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); hipMalloc((void **) &d_a, bytes); hipMalloc((void **) &d_b, bytes); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = 10*i+j; h_b[i*matrixSize + j] = 0; } } // init gpu arrays hipMemset(d_a, 0, bytes); hipMemset(d_b, 0, bytes); // copy to gpu hipMemcpy(d_a, h_a, bytes, hipMemcpyHostToDevice); hipMemcpy(d_b, h_b, bytes, hipMemcpyHostToDevice); // kernel call timer.Start(); hipLaunchKernelGGL(( transpose_element_paralel), dim3(dim3(g1, g2)), dim3(dim3(b1, b2)), 0, 0, d_a, d_b, matrixSize); timer.Stop(); cout << timer.Elapsed() << "\t<<<dim3(" << g1 << ", " << g2 << "), dim3(" << b1 << ", " << b2 << ")>>>\n"; // copy to host hipMemcpy(h_b, d_b, bytes, hipMemcpyDeviceToHost); if (showOutput) { for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_b[i*matrixSize + j]; } cout << endl; } cout << "\n"; } hipFree(d_a); hipFree(d_b); } int main() { cout << "Matrix size: 10x10" << "\n"; test(1, 1, 0, 10, true); cout << "\n"; test(1, 10, 1, 10, true); cout << "\n"; test1(5, 5, 2, 2, 10, true); // different test procedure cout << "\n"; cout << "\nMatrix size: 40x40" << "\n"; test(1, 1, 0, 40, false); test(1, 40, 1, 40, false); test1(10, 10, 4, 4, 40, false); // different test procedure cout << "\n"; cout << "\nMatrix size: 100x100" << "\n"; test(1, 1, 0, 100, false); test(1, 100, 1, 100, false); test1(25, 25, 4, 4, 100, false); // different test procedure cout << "\n"; cout << "\nMatrix size: 1024x1024" << "\n"; test(1, 1, 0, 1024, false); test(1, 1024, 1, 1024, false); test1(128, 128, 8, 8, 1024, false); // different test procedure test1(32, 32, 32, 32, 1024, false); // different test procedure cout << "\n"; scanf("%d", NULL); return 0; }
a7e2b3acbe42fba745a86f8b4aeb7b497e28be86.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "gputimer.h" #include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> using namespace std; __device__ int getGlobalIdx_2D_2D() { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x; return threadId; } __device__ int getGlobalIdx_1D_1D() { return blockIdx.x *blockDim.x + threadIdx.x; } __global__ void transpose_serial(float *a, float *b, int N) { for (int j=0; j<N; j++) { for (int i=0; i<N; i++) { b[j+i*N] = a[i + j*N]; } } } __global__ void transpose_row_paralel(float *a, float *b, int N) { int i = threadIdx.x; for (int j=0; j<N; j++) { b[j+i*N] = a[i+j*N]; } } __global__ void transpose_element_paralel(float *a, float *b, int N) { // absolute (real) 2D location int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; b[j+i*N] = a[i+j*N]; } void test(int g1, int g2, int b1, int b2, int kernel, int matrixSize, bool showOutput) { float *h_a, *h_b; float *d_a, *d_b; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); cudaMalloc((void **) &d_a, bytes); cudaMalloc((void **) &d_b, bytes); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = 10*i+j; h_b[i*matrixSize + j] = 0; } } // init gpu arrays cudaMemset(d_a, 0, bytes); cudaMemset(d_b, 0, bytes); // copy to gpu cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // kernel call if (kernel == 0) { timer.Start(); transpose_serial<<<dim3(g1, g2), dim3(b1, b2)>>>(d_a, d_b, matrixSize); timer.Stop(); } else if (kernel == 1) { timer.Start(); transpose_row_paralel<<<dim3(g1, g2), dim3(b1, b2)>>>(d_a, d_b, matrixSize); timer.Stop(); } else if (kernel == 2) { timer.Start(); transpose_element_paralel<<<dim3(g1, g2), dim3(b1, b2)>>>(d_a, d_b, matrixSize); timer.Stop(); } cout << timer.Elapsed() << "\t<<<" << g << ", " << b << ">>> " << "\n"; // copy to host cudaMemcpy(h_b, d_b, bytes, cudaMemcpyDeviceToHost); if (showOutput) { for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_b[i*matrixSize + j]; } cout << endl; } cout << "\n"; } cudaFree(d_a); cudaFree(d_b); } void test1(int g1, int g2, int b1, int b2, int matrixSize, bool showOutput) { float *h_a, *d_a; float *h_b, *d_b; int bytes = matrixSize * matrixSize * sizeof(float); GpuTimer timer; h_a = (float*) malloc(bytes); h_b = (float*) malloc(bytes); cudaMalloc((void **) &d_a, bytes); cudaMalloc((void **) &d_b, bytes); // init host arrays srand(time(NULL)); for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { h_a[i*matrixSize + j] = 10*i+j; h_b[i*matrixSize + j] = 0; } } // init gpu arrays cudaMemset(d_a, 0, bytes); cudaMemset(d_b, 0, bytes); // copy to gpu cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // kernel call timer.Start(); transpose_element_paralel<<<dim3(g1, g2), dim3(b1, b2)>>>(d_a, d_b, matrixSize); timer.Stop(); cout << timer.Elapsed() << "\t<<<dim3(" << g1 << ", " << g2 << "), dim3(" << b1 << ", " << b2 << ")>>>\n"; // copy to host cudaMemcpy(h_b, d_b, bytes, cudaMemcpyDeviceToHost); if (showOutput) { for (int i=0; i<matrixSize; i++) { for (int j=0; j<matrixSize; j++) { cout << " " << h_b[i*matrixSize + j]; } cout << endl; } cout << "\n"; } cudaFree(d_a); cudaFree(d_b); } int main() { cout << "Matrix size: 10x10" << "\n"; test(1, 1, 0, 10, true); cout << "\n"; test(1, 10, 1, 10, true); cout << "\n"; test1(5, 5, 2, 2, 10, true); // different test procedure cout << "\n"; cout << "\nMatrix size: 40x40" << "\n"; test(1, 1, 0, 40, false); test(1, 40, 1, 40, false); test1(10, 10, 4, 4, 40, false); // different test procedure cout << "\n"; cout << "\nMatrix size: 100x100" << "\n"; test(1, 1, 0, 100, false); test(1, 100, 1, 100, false); test1(25, 25, 4, 4, 100, false); // different test procedure cout << "\n"; cout << "\nMatrix size: 1024x1024" << "\n"; test(1, 1, 0, 1024, false); test(1, 1024, 1, 1024, false); test1(128, 128, 8, 8, 1024, false); // different test procedure test1(32, 32, 32, 32, 1024, false); // different test procedure cout << "\n"; scanf("%d", NULL); return 0; }
9b573a8f12317c3232fde1280e51382e2f96b14c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // The CUDA routines below are by Danny Ruijters, adapted by Jonathan Williams. // External files also required and included. /*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. When using this code in a scientific project, please cite one or all of the following papers: * Daniel Ruijters and Philippe Thvenaz, GPU Prefilter for Accurate Cubic B-Spline Interpolation, The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012. http://dannyruijters.nl/docs/cudaPrefilter3.pdf * Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens, Efficient GPU-Based Texture Interpolation using Uniform B-Splines, Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008. \*--------------------------------------------------------------------------*/ #include <stdio.h> #include <cubicTex2D.cu> #include "math_func.cu" #include <cubicPrefilter2D.cu> texture<float, 2, hipReadModeElementType> coeffs; //2D texture //-------------------------------------------------------------------------- // CUDA kernel. This is what is actually run on the GPU. // GPUs are broken down in to streaming multiprocessors, each one of which // contains multiple execution units, registers, memory/caches, etc. // Logically, CUDA programs are broken down into units called blocks. // A given block contains multiple threads that are executed simultaneously // each one on an execution unit), and a given block must fit into a single // streaming multiprocessor. // Part of the job of a streaming multiprocessor is to schedule the // execution of blocks. So a given CUDA kernel can contain many more threads // than a GPU has execution units. //-------------------------------------------------------------------------- // deviceImagePtrNew: the output array // width: the widht of the raster // mPlyer: the interpolation multiplier // rasMax, rasMin: limits on the output value __global__ void interpolate_kernel(float* deviceImagePtrNew, uint width, float mPlyer, float rasMax, float rasMin) { // Find the index i of the array element this thread will put its output into. // The threads in this kernel are organized into two dimensions, even though // the output array is one-dimensional. uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; uint i = y * width + x; // mPlyer is used to subdivide the existing raster into smaller units for interpolation. float x0 = (float)x; float y0 = (float)y; float x1 = x0/mPlyer; float y1 = y0/mPlyer; deviceImagePtrNew[i] = fminf(fmaxf(cubicTex2D(coeffs, x1, y1),rasMin),rasMax); } //-------------------------------------------------------------------------- // Copy floating point data from and to the GPU //-------------------------------------------------------------------------- //! Allocate GPU memory and copy a voxel volume from CPU to GPU memory //! @return the pitched pointer to the GPU copy of the voxel volume //! @param host pointer to the voxel volume in CPU (host) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels extern "C" hipPitchedPtr CopyVolumeHostToDevice(const float* hostImagePtr, uint width, uint height, uint depth) { hipPitchedPtr deviceImagePPtr = {0}; const hipExtent extent = make_hipExtent(width * sizeof(float), height, depth); hipMalloc3D(&deviceImagePPtr, extent); hipMemcpy3DParms p = {0}; p.srcPtr = make_hipPitchedPtr((void*)hostImagePtr, width * sizeof(float), width, height); p.dstPtr = deviceImagePPtr; p.extent = extent; p.kind = hipMemcpyHostToDevice; hipMemcpy3D(&p); return deviceImagePPtr; } //! Copy a voxel volume from GPU to CPU memory, and free the GPU memory //! @param host pointer to the voxel volume copy in CPU (host) memory //! @param device pitched pointer to the voxel volume in GPU (device) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels //! @note The \host CPU memory should be pre-allocated extern "C" void CopyVolumeDeviceToHost(float* hostImagePtr, const hipPitchedPtr deviceImagePPtr, uint width, uint height, uint depth) { const hipExtent extent = make_hipExtent(width * sizeof(float), height, depth); hipMemcpy3DParms p = {0}; p.srcPtr = deviceImagePPtr; p.dstPtr = make_hipPitchedPtr((void*)hostImagePtr, width * sizeof(float), width, height); p.extent = extent; p.kind = hipMemcpyDeviceToHost; hipMemcpy3D(&p); hipFree(deviceImagePPtr.ptr); //free the GPU volume } // This function executes the first two of three kernels on the GPU. // The prefilter is used to alter the input array such that the output interpolated array always goes // through the original raster values. If we didn't do this, the output would be smoothed overall. // Using the prefilter ends up sharpening the output. template<class floatN> extern void CubicBSplinePrefilter2D(hipPitchedPtr deviceImagePPtr, uint width, uint height) { dim3 dimBlockX(max(min(PowTwoDivider(height), 64), 1024)); dim3 dimGridX(height / dimBlockX.x); hipLaunchKernelGGL(( SamplesToCoefficients2DX<floatN>), dim3(dimGridX), dim3(dimBlockX), 0, 0, deviceImagePPtr.ptr, deviceImagePPtr.pitch, width, height); dim3 dimBlockY(max(min(PowTwoDivider(width), 64), 1024)); dim3 dimGridY(width / dimBlockY.x); hipLaunchKernelGGL(( SamplesToCoefficients2DY<floatN>), dim3(dimGridY), dim3(dimBlockY), 0, 0, deviceImagePPtr.ptr, deviceImagePPtr.pitch, width, height); } // This algorithm makes use of the texture memory and manipulation capabilities of the GPU. // initTexture takes the input finds relevant spline interpolation coefficients, and puts them // into texture memory on the GPU. extern "C" void initTexture(hipPitchedPtr deviceImagePPtr, uint width, uint height) { // Create the B-spline coefficients texture hipChannelFormatDesc channelDescCoeff = hipCreateChannelDesc<float>(); hipArray *coeffArray = 0; hipMallocArray(&coeffArray, &channelDescCoeff, width, height); hipMemcpy2DToArray(coeffArray, 0, 0, deviceImagePPtr.ptr, deviceImagePPtr.pitch, width * sizeof(float), height, hipMemcpyDeviceToDevice); hipBindTextureToArray(coeffs, coeffArray, channelDescCoeff); coeffs.normalized = false; // access with normalized texture coordinates coeffs.filterMode = hipFilterModeLinear; } // The interpolate function does the work of allocating output memory on the GPU and executing the // kernel. extern "C" hipPitchedPtr interpolate(uint width, uint height, float mPlyer, float rasMax, float rasMin) { // Allocate the output image float* deviceImagePtrNew; hipMalloc((void**)&deviceImagePtrNew, width * height * sizeof(float)); // here we calculate the appropriate number of threads (in two dimensions) per block. // A given generation of GPU has a limit on the number of threads per block. int bSizeX = min(PowTwoDivider(width), 16); int bSizeY = min(PowTwoDivider(height), 16); if (bSizeX * bSizeY > 1024) { bSizeX = 32; bSizeY = 32; } dim3 blockSize(bSizeX, bSizeY); dim3 gridSize(width / blockSize.x, height / blockSize.y); // Call the CUDA execution kernel hipLaunchKernelGGL(( interpolate_kernel), dim3(gridSize), dim3(blockSize), 0, 0, deviceImagePtrNew, width, mPlyer, rasMax, rasMin); //return a pointer to the resulting output on the GPU return make_hipPitchedPtr(deviceImagePtrNew, width * sizeof(float), width, height); } extern "C" void MyCubicBSplinePrefilter2D(hipPitchedPtr image, uint width, uint height) { return CubicBSplinePrefilter2D((float*)image.ptr, (uint)image.pitch, width, height); }
9b573a8f12317c3232fde1280e51382e2f96b14c.cu
// The CUDA routines below are by Danny Ruijters, adapted by Jonathan Williams. // External files also required and included. /*--------------------------------------------------------------------------*\ Copyright (c) 2008-2010, Danny Ruijters. All rights reserved. http://www.dannyruijters.nl/cubicinterpolation/ This file is part of CUDA Cubic B-Spline Interpolation (CI). Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the copyright holders nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. The views and conclusions contained in the software and documentation are those of the authors and should not be interpreted as representing official policies, either expressed or implied. When using this code in a scientific project, please cite one or all of the following papers: * Daniel Ruijters and Philippe Thévenaz, GPU Prefilter for Accurate Cubic B-Spline Interpolation, The Computer Journal, vol. 55, no. 1, pp. 15-20, January 2012. http://dannyruijters.nl/docs/cudaPrefilter3.pdf * Daniel Ruijters, Bart M. ter Haar Romeny, and Paul Suetens, Efficient GPU-Based Texture Interpolation using Uniform B-Splines, Journal of Graphics Tools, vol. 13, no. 4, pp. 61-69, 2008. \*--------------------------------------------------------------------------*/ #include <stdio.h> #include <cubicTex2D.cu> #include "math_func.cu" #include <cubicPrefilter2D.cu> texture<float, 2, cudaReadModeElementType> coeffs; //2D texture //-------------------------------------------------------------------------- // CUDA kernel. This is what is actually run on the GPU. // GPUs are broken down in to streaming multiprocessors, each one of which // contains multiple execution units, registers, memory/caches, etc. // Logically, CUDA programs are broken down into units called blocks. // A given block contains multiple threads that are executed simultaneously // each one on an execution unit), and a given block must fit into a single // streaming multiprocessor. // Part of the job of a streaming multiprocessor is to schedule the // execution of blocks. So a given CUDA kernel can contain many more threads // than a GPU has execution units. //-------------------------------------------------------------------------- // deviceImagePtrNew: the output array // width: the widht of the raster // mPlyer: the interpolation multiplier // rasMax, rasMin: limits on the output value __global__ void interpolate_kernel(float* deviceImagePtrNew, uint width, float mPlyer, float rasMax, float rasMin) { // Find the index i of the array element this thread will put its output into. // The threads in this kernel are organized into two dimensions, even though // the output array is one-dimensional. uint x = blockIdx.x * blockDim.x + threadIdx.x; uint y = blockIdx.y * blockDim.y + threadIdx.y; uint i = y * width + x; // mPlyer is used to subdivide the existing raster into smaller units for interpolation. float x0 = (float)x; float y0 = (float)y; float x1 = x0/mPlyer; float y1 = y0/mPlyer; deviceImagePtrNew[i] = fminf(fmaxf(cubicTex2D(coeffs, x1, y1),rasMin),rasMax); } //-------------------------------------------------------------------------- // Copy floating point data from and to the GPU //-------------------------------------------------------------------------- //! Allocate GPU memory and copy a voxel volume from CPU to GPU memory //! @return the pitched pointer to the GPU copy of the voxel volume //! @param host pointer to the voxel volume in CPU (host) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels extern "C" cudaPitchedPtr CopyVolumeHostToDevice(const float* hostImagePtr, uint width, uint height, uint depth) { cudaPitchedPtr deviceImagePPtr = {0}; const cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth); cudaMalloc3D(&deviceImagePPtr, extent); cudaMemcpy3DParms p = {0}; p.srcPtr = make_cudaPitchedPtr((void*)hostImagePtr, width * sizeof(float), width, height); p.dstPtr = deviceImagePPtr; p.extent = extent; p.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&p); return deviceImagePPtr; } //! Copy a voxel volume from GPU to CPU memory, and free the GPU memory //! @param host pointer to the voxel volume copy in CPU (host) memory //! @param device pitched pointer to the voxel volume in GPU (device) memory //! @param width volume width in number of voxels //! @param height volume height in number of voxels //! @param depth volume depth in number of voxels //! @note The \host CPU memory should be pre-allocated extern "C" void CopyVolumeDeviceToHost(float* hostImagePtr, const cudaPitchedPtr deviceImagePPtr, uint width, uint height, uint depth) { const cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth); cudaMemcpy3DParms p = {0}; p.srcPtr = deviceImagePPtr; p.dstPtr = make_cudaPitchedPtr((void*)hostImagePtr, width * sizeof(float), width, height); p.extent = extent; p.kind = cudaMemcpyDeviceToHost; cudaMemcpy3D(&p); cudaFree(deviceImagePPtr.ptr); //free the GPU volume } // This function executes the first two of three kernels on the GPU. // The prefilter is used to alter the input array such that the output interpolated array always goes // through the original raster values. If we didn't do this, the output would be smoothed overall. // Using the prefilter ends up sharpening the output. template<class floatN> extern void CubicBSplinePrefilter2D(cudaPitchedPtr deviceImagePPtr, uint width, uint height) { dim3 dimBlockX(max(min(PowTwoDivider(height), 64), 1024)); dim3 dimGridX(height / dimBlockX.x); SamplesToCoefficients2DX<floatN><<<dimGridX, dimBlockX>>>(deviceImagePPtr.ptr, deviceImagePPtr.pitch, width, height); dim3 dimBlockY(max(min(PowTwoDivider(width), 64), 1024)); dim3 dimGridY(width / dimBlockY.x); SamplesToCoefficients2DY<floatN><<<dimGridY, dimBlockY>>>(deviceImagePPtr.ptr, deviceImagePPtr.pitch, width, height); } // This algorithm makes use of the texture memory and manipulation capabilities of the GPU. // initTexture takes the input finds relevant spline interpolation coefficients, and puts them // into texture memory on the GPU. extern "C" void initTexture(cudaPitchedPtr deviceImagePPtr, uint width, uint height) { // Create the B-spline coefficients texture cudaChannelFormatDesc channelDescCoeff = cudaCreateChannelDesc<float>(); cudaArray *coeffArray = 0; cudaMallocArray(&coeffArray, &channelDescCoeff, width, height); cudaMemcpy2DToArray(coeffArray, 0, 0, deviceImagePPtr.ptr, deviceImagePPtr.pitch, width * sizeof(float), height, cudaMemcpyDeviceToDevice); cudaBindTextureToArray(coeffs, coeffArray, channelDescCoeff); coeffs.normalized = false; // access with normalized texture coordinates coeffs.filterMode = cudaFilterModeLinear; } // The interpolate function does the work of allocating output memory on the GPU and executing the // kernel. extern "C" cudaPitchedPtr interpolate(uint width, uint height, float mPlyer, float rasMax, float rasMin) { // Allocate the output image float* deviceImagePtrNew; cudaMalloc((void**)&deviceImagePtrNew, width * height * sizeof(float)); // here we calculate the appropriate number of threads (in two dimensions) per block. // A given generation of GPU has a limit on the number of threads per block. int bSizeX = min(PowTwoDivider(width), 16); int bSizeY = min(PowTwoDivider(height), 16); if (bSizeX * bSizeY > 1024) { bSizeX = 32; bSizeY = 32; } dim3 blockSize(bSizeX, bSizeY); dim3 gridSize(width / blockSize.x, height / blockSize.y); // Call the CUDA execution kernel interpolate_kernel<<<gridSize, blockSize>>>(deviceImagePtrNew, width, mPlyer, rasMax, rasMin); //return a pointer to the resulting output on the GPU return make_cudaPitchedPtr(deviceImagePtrNew, width * sizeof(float), width, height); } extern "C" void MyCubicBSplinePrefilter2D(cudaPitchedPtr image, uint width, uint height) { return CubicBSplinePrefilter2D((float*)image.ptr, (uint)image.pitch, width, height); }
3413f9daffb07de256d6d885518cc767f769d806.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <iostream> # include <fstream> # include <string> # include <iomanip> # include <cmath> # include <istream> # include <hip/hip_runtime.h> # include <hip/hip_runtime.h> # include <cassert> # include <sys/stat.h> # include <chrono> # include <ctime> # include <typeinfo> # include <thrust/tuple.h> # include <omp.h> # include <tuple> # define data_type double using namespace std; string part_input_file, part_out_name_base, vtk_out_name_base; data_type timestep_length, time_end, epsilon, sigma, part_out_freq, vtk_out_freq, cl_workgroup_1dsize; unsigned out_count = 0, vtk_count = 0, x_n, y_n, z_n, cell_size; void checkError(hipError_t err){ if(err!= hipSuccess){ std::cout<<hipGetErrorString(err)<<std::endl; exit(-1); } } //************************************** Structs for variables ************************************************** template <typename T> struct particle{ T x; T y; T z; T vx; T vy; T vz; T m; T force_x; T force_y; T force_z; int cell_num; }; template <typename T> struct Domain { T x_min; T x_max; T y_min; T y_max; T z_min; T z_max; unsigned int x_n; unsigned int y_n; unsigned int z_n; T r_cut; T x_len; T y_len; T z_len; int cell_size; int thread_size; int block_size; int cpu_x = 20; T max_vel = 48; //ramdom value either read from par file or taken as given } ; Domain<data_type> domain; //************************************** Input Parameters from Par ************************************************** void input_para(string name) { string testline; string::size_type sz; ifstream Input (name); if (!Input) { cout << "There was a problem opening the file. Press any key to close.\n"; } while( Input.good() ) { getline ( Input, testline, ' '); if (testline!="") { if (testline == "part_input_file") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); part_input_file = testline ; } else if (testline == "timestep_length") { getline ( Input, testline); timestep_length = stof(testline, &sz) ; } else if (testline == "time_end") { getline ( Input, testline); time_end = stof(testline, &sz) ; } else if (testline == "epsilon") { getline ( Input, testline); epsilon = stof(testline, &sz) ; } else if (testline == "sigma") { getline ( Input, testline); sigma = stof(testline, &sz) ; } else if (testline == "part_out_freq") { getline ( Input, testline); part_out_freq = stof(testline, &sz) ; } else if (testline == "part_out_name_base") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); part_out_name_base = testline ; } else if (testline == "vtk_out_freq") { getline ( Input, testline); vtk_out_freq = stof(testline, &sz) ; } else if (testline == "vtk_out_name_base") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); vtk_out_name_base = testline ; } else if (testline == "cl_workgroup_1dsize") { getline ( Input, testline); domain.thread_size = stoi(testline); } else if (testline == "cl_workgroup_3dsize_x") { getline ( Input, testline); } else if (testline == "cl_workgroup_3dsize_y") { getline ( Input, testline); } else if (testline == "cl_workgroup_3dsize_z") { getline ( Input, testline); } else if (testline == "x_min") { getline ( Input, testline); domain.x_min = stof(testline, &sz) ; } else if (testline == "y_min") { getline ( Input, testline); domain.y_min = stof(testline, &sz) ; } else if (testline == "z_min") { getline ( Input, testline); domain.z_min = stof(testline, &sz) ; } else if (testline == "x_max") { getline ( Input, testline); domain.x_max = stof(testline, &sz) ; } else if (testline == "y_max") { getline ( Input, testline); domain.y_max = stof(testline, &sz) ; } else if (testline == "z_max") { getline ( Input, testline); domain.z_max = stof(testline, &sz) ; } else if (testline == "r_cut") { getline ( Input, testline); domain.r_cut = stof(testline, &sz) ; } else if (testline == "x_n") { getline ( Input, testline); domain.x_n = stoul(testline, nullptr,0) ; } else if (testline == "y_n") { getline ( Input, testline); domain.y_n = stoul(testline, nullptr,0) ; } else if (testline == "z_n") { getline ( Input, testline); domain.z_n = stoul(testline, nullptr,0) ; } else if (testline == "max_vel") { getline ( Input, testline); domain.max_vel = stof(testline, &sz) ; } else if (testline == "cpu_x") { getline ( Input, testline); domain.cpu_x = stof(testline, &sz) ; } } } domain.x_len = (domain.x_max - domain.x_min)/domain.x_n; domain.y_len = (domain.y_max - domain.y_min)/domain.y_n; domain.z_len = (domain.z_max - domain.z_min)/domain.z_n; domain.cell_size = ((domain.x_max - domain.x_min)*(domain.y_max - domain.y_min)*(domain.z_max - domain.z_min))/(domain.x_len*domain.y_len*domain.z_len); domain.cpu_x = int(domain.x_n*domain.cpu_x/100); /*cout<<"part_input_file : "<<part_input_file<<"\t"<<part_input_file.size()<<endl; cout<<"timestep_length : "<<timestep_length<<endl; cout<<"time_end : "<<time_end<<endl; cout<<"epsilon : "<<epsilon<<endl; cout<<"sigma : "<<sigma<<endl; cout<<"part_out_freq : "<<part_out_freq<<endl; cout<<"part_out_name_base : "<<part_out_name_base<<"\t"<<part_out_name_base.size()<<endl; cout<<"vtk_out_freq : "<<vtk_out_freq<<endl; cout<<"vtk_out_name_base : "<<vtk_out_name_base<<"\t"<<vtk_out_name_base.size()<<endl; cout<<"x_min: "<<domain.x_min<<endl; cout<<"y_min: "<<domain.y_min<<endl; cout<<"z_min: "<<domain.z_min<<endl; cout<<"x_max: "<<domain.x_max<<endl; cout<<"y_max: "<<domain.y_max<<endl; cout<<"z_max: "<<domain.z_max<<endl; cout<<"x_n: "<<domain.x_n<<endl; cout<<"y_n: "<<domain.y_n<<endl; cout<<"z_n: "<<domain.z_n<<endl; cout<<"r_cut: "<<domain.r_cut<<endl; cout<<"max_vel: "<<domain.max_vel<<endl; cout<<"cpu_x: "<<domain.cpu_x<<endl;*/ struct stat st; if(stat(&part_out_name_base[0],&st) != 0) { const int dir_err = mkdir(&part_out_name_base[0], S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (-1 == dir_err) { printf("Error creating directory!n"); exit(1); } } string subName = part_out_name_base+ "/Particle_parallel2_"; if(stat(&subName[0],&st) != 0) { const int dir_err = mkdir(&subName[0], S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (-1 == dir_err) { printf("Error creating directory!n"); exit(1); } } } //************************************** Initializing Particles ************************************************** template <typename T> __global__ void init_particle (particle<T> *elements, int num_par) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<num_par) { elements[id].cell_num = id; id = id + blockDim.x * gridDim.x; } } template <typename T> __host__ tuple<int,int> init_cpu_gpu(particle<T> *elements, Domain<T> domain,int num_par, int *cpu_par, int *gpu_par) { int cpu_count = 0, gpu_count = 0; for(int i=0; i<num_par; ++i) { int x_cell = (elements[i].x-domain.x_min)/domain.x_len; cpu_par[i] = -1; gpu_par[i] = -1; if(x_cell > domain.cpu_x) { gpu_par[gpu_count] = i; gpu_count++; } else { cpu_par[cpu_count] = i; cpu_count++; } } return make_tuple(cpu_count,gpu_count); } //************************************** Initializing & updating cell ************************************************** template <typename T> __global__ void init_cell (Domain<T> domain, int *cell_arr) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<domain.cell_size) { cell_arr[id] = -1; id = id + blockDim.x * gridDim.x; } } template <typename T> __device__ void update_cell_calc (particle<T> *elements, int id, Domain<T> domain, int *cell_arr) { int x_cell = (elements[id].x-domain.x_min)/domain.x_len; int y_cell = (elements[id].y-domain.y_min)/domain.y_len; int z_cell = (elements[id].z-domain.z_min)/domain.z_len; int cell_count = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; elements[id].cell_num = atomicExch(&cell_arr[cell_count], elements[id].cell_num); } template <typename T> __global__ void update_cell (particle<T> *elements, int num_par, Domain<T> domain, int *cell_arr) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<num_par) { update_cell_calc<T>(elements, id, domain, cell_arr); id = id + blockDim.x * gridDim.x; } } //************************************** Force Calculation using Lenards ************************************************** template <typename T> __device__ thrust::tuple<T,T,T> force ( T dist_x1, T dist_y1, T dist_z1, particle<T> *elements, T epsilon, T sigma, int id , int curr_pos, int coord, Domain<T> domain, int *cell_arr) { T result_x = 0,result_y = 0,result_z = 0; T x,y,z, mag; int temp = id; int curr_z = temp/(domain.x_n*domain.y_n); temp -= curr_z*domain.x_n*domain.y_n; int curr_y = temp/domain.x_n; int curr_x = temp % domain.x_n; for(int p = curr_x-1; p<curr_x+2; ++p) { for(int q = curr_y-1; q<curr_y+2; ++q) { for(int r = curr_z-1; r<curr_z+2; ++r) { if ( p >= 0 && p < domain.x_n && q >= 0 && q < domain.y_n && r >= 0 && r < domain.z_n) { int cell_index = p + q*domain.y_n + r*domain.y_n*domain.x_n; int i = cell_arr[cell_index]; while (i != -1) { if (i != curr_pos) { if( dist_x1 < elements[i].x) { if( abs(dist_x1 - elements[i].x) <= (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x))) x = dist_x1 - elements[i].x ; else x = (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x)); } else { if(abs(elements[i].x - dist_x1) <= (abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min))) x = dist_x1 - elements[i].x ; else x = -(abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min)); } if( dist_y1 < elements[i].y) { if( abs(dist_y1 - elements[i].y) <= (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y))) y = dist_y1 - elements[i].y ; else y = (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y)); } else { if(abs(elements[i].y - dist_y1) <= (abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min))) y = dist_y1 - elements[i].y ; else y = -(abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min)); } if( dist_z1 < elements[i].z) { if( abs(dist_z1 - elements[i].z) <= (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z))) z = dist_z1 - elements[i].z ; else z = (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z)); } else { if(abs(elements[i].z - dist_z1) <= (abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min))) z = dist_z1 - elements[i].z ; else z = -(abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min)); } // magnitude of the distance vector mag = sqrt(pow(x,2)+ pow(y,2) + pow(z,2)); if(mag<=domain.r_cut) { result_x= result_x + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * x )); result_y= result_y + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * y )); result_z= result_z + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * z )); } } i = elements[i].cell_num; } } } } } return thrust::make_tuple(result_x,result_y,result_z) ; } //************************************** Initialization of Forces ************************************************** template <typename T> __global__ void init_force ( particle<T> *elements, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr) { int curr_pos = threadIdx.x + blockIdx.x * blockDim.x; while( curr_pos < num_par) { thrust::tuple<T,T,T> result; int x_cell = (elements[curr_pos].x-domain.x_min)/domain.x_len; int y_cell = (elements[curr_pos].y-domain.y_min)/domain.y_len; int z_cell = (elements[curr_pos].z-domain.z_min)/domain.z_len; int id = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; result = force<T> (elements[curr_pos].x, elements[curr_pos].y, elements[curr_pos].z, elements, epsilon, sigma, id, curr_pos, 1, domain, cell_arr); elements[curr_pos].force_x = thrust::get<0>(result); elements[curr_pos].force_y = thrust::get<1>(result); elements[curr_pos].force_z = thrust::get<2>(result); curr_pos = curr_pos + blockDim.x * gridDim.x; } } //************************************** Kernel to calculate force ************************************************** template <typename T> __global__ void main_kernel_force (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr, int *gpu_par, int gpu_count ) { int idg = threadIdx.x + blockIdx.x * blockDim.x; T force_x_o, force_y_o, force_z_o; while( idg < gpu_count) { int id = gpu_par[idg]; if(id != -1) { int x_cell = (elements[id].x-domain.x_min)/domain.x_len; int y_cell = (elements[id].y-domain.y_min)/domain.y_len; int z_cell = (elements[id].z-domain.z_min)/domain.z_len; int idc = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; thrust::tuple<T,T,T> result; force_x_o = elements[id].force_x; force_y_o = elements[id].force_y; force_z_o = elements[id].force_z; result = force<T> (elements[id].x, elements[id].y, elements[id].z, elements, epsilon, sigma, idc, id, 1, domain, cell_arr); // Shooting elements[id].force_x = thrust::get<0>(result); elements[id].force_y = thrust::get<1>(result); elements[id].force_z = thrust::get<2>(result); elements[id].vx = elements[id].vx + ((force_x_o+elements[id].force_x)*delta_t/(2*elements[id].m)); if(abs(elements[id].vx)>domain.max_vel) elements[id].vx = domain.max_vel*(elements[id].vx/abs(elements[id].vx)); elements[id].vy = elements[id].vy + ((force_y_o+elements[id].force_y)*delta_t/(2*elements[id].m)); if(abs(elements[id].vy)>domain.max_vel) elements[id].vy = domain.max_vel*(elements[id].vy/abs(elements[id].vy)); elements[id].vz = elements[id].vz + ((force_z_o+elements[id].force_z)*delta_t/(2*elements[id].m)); if(abs(elements[id].vz)>domain.max_vel) elements[id].vz = domain.max_vel*(elements[id].vz/abs(elements[id].vz)); } idg = idg + blockDim.x * gridDim.x; } } //************************************** Host to calculate force ************************************************** template <typename T> __host__ void main_host_force (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr, int *cpu_par, int cpu_count ) { #pragma omp parallel for for(int k=0 ; k < cpu_count; ++k) { int j = cpu_par[k]; if(j != -1) { T force_x_o, force_y_o, force_z_o; int x_cell = (elements[j].x-domain.x_min)/domain.x_len; int y_cell = (elements[j].y-domain.y_min)/domain.y_len; int z_cell = (elements[j].z-domain.z_min)/domain.z_len; int idc = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; force_x_o = elements[j].force_x; force_y_o = elements[j].force_y; force_z_o = elements[j].force_z; T result_x = 0; T result_y = 0; T result_z = 0; T x,y,z, mag; T dist_x1 = elements[j].x; T dist_y1 = elements[j].y; T dist_z1 = elements[j].z; int temp = idc; int curr_z = temp/(domain.x_n*domain.y_n); temp -= curr_z*domain.x_n*domain.y_n; int curr_y = temp/domain.x_n; int curr_x = temp % domain.x_n; for(int p = curr_x-1; p<curr_x+2; ++p) { for(int q = curr_y-1; q<curr_y+2; ++q) { for(int r = curr_z-1; r<curr_z+2; ++r) { if ( p >= 0 && p < domain.x_n && q >= 0 && q < domain.y_n && r >= 0 && r < domain.z_n) { int cell_index = p + q*domain.y_n + r*domain.y_n*domain.x_n; int i = cell_arr[cell_index]; while (i != -1) { if (i != j) { if( dist_x1 < elements[i].x) { if( abs(dist_x1 - elements[i].x) <= (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x))) x = dist_x1 - elements[i].x ; else x = (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x)); } else { if(abs(elements[i].x - dist_x1) <= (abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min))) x = dist_x1 - elements[i].x ; else x = -(abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min)); } if( dist_y1 < elements[i].y) { if( abs(dist_y1 - elements[i].y) <= (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y))) y = dist_y1 - elements[i].y ; else y = (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y)); } else { if(abs(elements[i].y - dist_y1) <= (abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min))) y = dist_y1 - elements[i].y ; else y = -(abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min)); } if( dist_z1 < elements[i].z) { if( abs(dist_z1 - elements[i].z) <= (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z))) z = dist_z1 - elements[i].z ; else z = (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z)); } else { if(abs(elements[i].z - dist_z1) <= (abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min))) z = dist_z1 - elements[i].z ; else z = -(abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min)); } // magnitude of the distance vector mag = sqrt(pow(x,2)+ pow(y,2) + pow(z,2)); if(mag<=domain.r_cut) { result_x= result_x + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * x )); result_y= result_y + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * y )); result_z= result_z + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * z )); } } i = elements[i].cell_num; } } } } } elements[j].force_x = result_x; elements[j].force_y = result_y; elements[j].force_z = result_z; elements[j].vx = elements[j].vx + ((force_x_o+elements[j].force_x)*delta_t/(2*elements[j].m)); if(abs(elements[j].vx)>domain.max_vel) elements[j].vx = domain.max_vel*(elements[j].vx/abs(elements[j].vx)); elements[j].vy = elements[j].vy + ((force_y_o+elements[j].force_y)*delta_t/(2*elements[j].m)); if(abs(elements[j].vy)>domain.max_vel) elements[j].vy = domain.max_vel*(elements[j].vy/abs(elements[j].vy)); elements[j].vz = elements[j].vz + ((force_z_o+elements[j].force_z)*delta_t/(2*elements[j].m)); if(abs(elements[j].vz)>domain.max_vel) elements[j].vz = domain.max_vel*(elements[j].vz/abs(elements[j].vz)); } } } //************************************** kernel to calculate distance ************************************************** template <typename T> __global__ void main_kernel_dist (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; while( id < num_par) { elements[id].x = elements[id].x + (delta_t * elements[id].vx) + ( (elements[id].force_x * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].x >= domain.x_max ) { elements[id].x = elements[id].x - (domain.x_max-domain.x_min); } else if (elements[id].x <= domain.x_min) { elements[id].x = (domain.x_max-domain.x_min) + elements[id].x; } elements[id].y = elements[id].y + (delta_t * elements[id].vy) + ( (elements[id].force_y * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].y >= domain.y_max) { elements[id].y = elements[id].y - (domain.y_max-domain.y_min); } else if (elements[id].y <= domain.y_min) { elements[id].y = (domain.y_max-domain.y_min) + elements[id].y; } elements[id].z = elements[id].z + (delta_t * elements[id].vz) + ( (elements[id].force_z * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].z >= domain.z_max ) { elements[id].z= elements[id].z - (domain.z_max-domain.z_min); } else if (elements[id].z <= domain.z_min) { elements[id].z = (domain.z_max-domain.z_min) + elements[id].z; } id = id + blockDim.x * gridDim.x; } } //************************************** VTK and OUT file write ************************************************** template <typename T> void vtk (particle<T> *elements, int num_par) { string vtk_name = part_out_name_base+"/Particle_parallel2_/"+vtk_out_name_base + "_" + to_string(vtk_count) + ".vtk"; fstream file; string type = typeid(elements[1].m).name(); if(type == "d") type = "double"; else if(type == "f") type = "float"; file.open(vtk_name,ios::out); file << "# vtk DataFile Version 4.0" <<"\n" << "hesp visualization file" << "\n" << "ASCII" << "\n" << "DATASET UNSTRUCTURED_GRID" << "\n" << "POINTS "<< num_par<<" "<< type << "\n"; for( int i = 0; i< num_par; ++i) { file <<fixed<<setprecision(6)<< elements[i].x <<" "<<fixed<<setprecision(6)<< elements[i].y <<" "<<fixed<<setprecision(6)<< elements[i].z <<"\n"; } file << "CELLS 0 0" << "\n" << "CELL_TYPES 0" << "\n" << "POINT_DATA " <<num_par<< "\n" << "SCALARS m "<< type << "\n" << "LOOKUP_TABLE default" << "\n"; for( int i = 0; i< num_par; ++i) { file <<fixed<<setprecision(6)<< elements[i].m <<"\n"; } file << "VECTORS v "<< type <<"\n"; for( int i = 0; i< num_par; ++i) { file<<fixed<<setprecision(6) << elements[i].vx <<" "<<fixed<<setprecision(6)<< elements[i].vy <<" "<<fixed<<setprecision(6)<< elements[i].vz <<"\n"; } file.close(); vtk_count++; } template <typename T> void out ( particle<T> *elements, int num_par) { string out_name = part_out_name_base+"/Particle_parallel2_/"+part_out_name_base + "_" + to_string(out_count) + ".out"; fstream file; file.open(out_name,ios::out); file << num_par <<"\n"; for(int i = 0; i< num_par; ++i) { file<<fixed<<setprecision(6)<< elements[i].m << " "<<fixed<<setprecision(6)<< elements[i].x <<" "<<fixed<<setprecision(6)<< elements[i].y <<" "<<fixed<<setprecision(6)<< elements[i].z <<" "<<fixed<<setprecision(6)<< elements[i].vx<<" "<<fixed<<setprecision(6)<< elements[i].vy << " "<<fixed<<setprecision(6)<< elements[i].vz<< endl; } file.close(); out_count++; } //************************************** Main ************************************************** int main( int argc, char *argv[] ) { data_type t = 0; int N,i=0,count = 0, *d_cell_arr, *cpu_par, *gpu_par; double negative_ratio = 1, positiv_ratio = 1; bool file_val = true; string::size_type sz; string testline, name; input_para(argv[1]); tuple<int,int> cpu_gpu_count; int cpu_count = 0; int gpu_count = 0; assert((domain.x_max-domain.x_min)/domain.x_n >= domain.r_cut && (domain.y_max-domain.y_min)/domain.y_n >= domain.r_cut && (domain.z_max-domain.z_min)/domain.z_n >= domain.r_cut); ifstream Input (part_input_file); getline ( Input, testline); N = atoi(testline.c_str()) ; particle<data_type> *elements; checkError(hipMallocManaged(&elements,N*sizeof(particle<data_type>))); // create object for particle structure in device checkError(hipMallocManaged((void**)&cpu_par,N*sizeof(int))); checkError(hipMallocManaged((void**)&gpu_par,N*sizeof(int))); //************************************** Read input values ************************************************** while( !Input.eof() && file_val ) { file_val = getline ( Input, testline,' '); if(file_val == false ) break; elements[i].m = stod(testline, &sz) ; getline ( Input, testline,' '); elements[i].x = stod(testline, &sz) ; if( elements[i].x > domain.x_max ) { elements[i].x = elements[i].x - (domain.x_max-domain.x_min); } else if (elements[i].x < domain.x_min) { elements[i].x = (domain.x_max-domain.x_min) + elements[i].x; } getline ( Input, testline,' '); elements[i].y = stod(testline, &sz) ; if( elements[i].y > domain.y_max) { elements[i].y = elements[i].y - (domain.y_max-domain.y_min); } else if (elements[i].y < domain.y_min) { elements[i].y = (domain.y_max-domain.y_min) + elements[i].y; } getline ( Input, testline,' '); elements[i].z = stod(testline, &sz) ; if( elements[i].z > domain.z_max ) { elements[i].z = elements[i].z - (domain.z_max-domain.z_min); } else if (elements[i].z < domain.z_min) { elements[i].z = (domain.z_max-domain.z_min) + elements[i].z; } getline ( Input, testline,' '); elements[i].vx = stod(testline, &sz) ; getline ( Input, testline,' '); elements[i].vy = stod(testline, &sz) ; getline ( Input, testline,'\n'); elements[i].vz = stod(testline, &sz) ; elements[i].force_x = 0; elements[i].force_y = 0; elements[i].force_z = 0; elements[i].cell_num = i; cpu_par[i] = -1; gpu_par[i] = -1; i++; } domain.block_size = N/domain.thread_size +1; auto start = std::chrono::system_clock::now(); checkError(hipMallocManaged((void**)&d_cell_arr,domain.cell_size*sizeof(int))); // // cell array initializatio in device hipLaunchKernelGGL(( init_cell<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, domain, d_cell_arr); // initializing cell array with -1 cpu_gpu_count = init_cpu_gpu<data_type>(elements,domain,N,cpu_par,gpu_par); cpu_count = get<0>(cpu_gpu_count); gpu_count = get<1>(cpu_gpu_count); hipLaunchKernelGGL(( update_cell<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,N,domain, d_cell_arr); // update cell array with particle pisition checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); hipLaunchKernelGGL(( init_force<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,epsilon,sigma, N, domain, d_cell_arr); // calculate initial force checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); string result_name = part_out_name_base+"/Particle_parallel2_/result_time.txt"; fstream file1; file1.open(result_name,std::ios_base::app); while(count <= int(std::round(time_end/timestep_length))) { // ************************ write out vtk and out ********************** /* if((count%int(vtk_out_freq)) == 0 ) { vtk<data_type>(&elements[0],N); } if( (count%int(part_out_freq)) == 0 ) { out<data_type>(&elements[0],N); }*/ // ************************ Calculate distance ********************** /*int device = -1; checkError(hipGetDevice(&device)); checkError(hipMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(hipMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); //checkError(hipDeviceSynchronize());*/ auto start_dist = std::chrono::system_clock::now(); hipLaunchKernelGGL(( main_kernel_dist<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); hipLaunchKernelGGL(( init_particle<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,N); // CHANGE hipLaunchKernelGGL(( init_cell<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, domain, d_cell_arr); checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); auto end_dist = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_dist = end_dist-start_dist; auto start_up = std::chrono::system_clock::now(); hipLaunchKernelGGL(( update_cell<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,N,domain, d_cell_arr); // update cell array with particle pisition checkError(hipPeekAtLastError()); checkError(hipDeviceSynchronize()); auto end_up = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_up = end_up-start_up; /* checkError(hipGetDevice(&device)); checkError(hipMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(hipMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL));*/ //checkError(hipDeviceSynchronize()); auto start_cpu_for = std::chrono::system_clock::now(); cpu_gpu_count = init_cpu_gpu<data_type>(elements,domain,N,cpu_par,gpu_par); cpu_count = get<0>(cpu_gpu_count); gpu_count = get<1>(cpu_gpu_count); auto end_cpu_for = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_cpu_for = end_cpu_for-start_cpu_for; // ************************ calculate force ********************** /* checkError(hipGetDevice(&device)); checkError(hipMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(hipMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); checkError(hipDeviceSynchronize());*/ hipEvent_t start, stop; checkError(hipEventCreate(&start)); checkError(hipEventCreate(&stop)); checkError(hipEventRecord(start)); domain.block_size = gpu_count/domain.thread_size +1; hipLaunchKernelGGL(( main_kernel_force<data_type>), dim3(domain.block_size),dim3(domain.thread_size), 0, 0, elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr, gpu_par, gpu_count); checkError(hipEventRecord(stop)); /* checkError(hipGetDevice(&device)); checkError(hipMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(hipMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); checkError(hipDeviceSynchronize());*/ auto start_cpu = std::chrono::system_clock::now(); main_host_force<data_type>(elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr, cpu_par, cpu_count); auto end_cpu = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_cpu = end_cpu-start_cpu; checkError(hipEventSynchronize(stop)); checkError(hipPeekAtLastError()); //checkError(hipDeviceSynchronize()); float milliseconds = 0; checkError(hipEventElapsedTime(&milliseconds, start, stop)); float time_ratio = (milliseconds/(elapsed_seconds_cpu.count()*1000)); if(time_ratio<1) { negative_ratio = 1; positiv_ratio = positiv_ratio *1.2; domain.cpu_x = int(domain.cpu_x/positiv_ratio); if( domain.cpu_x == 0) domain.cpu_x = 1; } else if(time_ratio>1.5) { if( domain.cpu_x == 0) domain.cpu_x = 1; positiv_ratio = 1; negative_ratio = negative_ratio*1.2; domain.cpu_x = int(domain.cpu_x*negative_ratio); //if(domain.cpu_x > domain.x_n-1) domain.cpu_x = domain.x_n-1; } else { positiv_ratio = 1; negative_ratio = 1; } domain.block_size = N/domain.thread_size +1; //if(count<500) //cout<<count<<"\tGpu: "<<milliseconds<<"\tCPU :"<<elapsed_seconds_cpu.count()*1000<<"\tCPU_for :"<<elapsed_seconds_cpu_for.count()*1000<< "\tdist :"<<elapsed_seconds_dist.count()*1000<<"\tup :"<<elapsed_seconds_up.count()*1000<<"\tcpu_count:"<<cpu_count<<"\tGPU_count:"<<gpu_count<< "\t cpu_x :"<<domain.cpu_x<<endl; file1<<count<<"\tGpu: "<<milliseconds<<"\tCPU :"<<elapsed_seconds_cpu.count()*1000<<"\tCPU_for :"<<elapsed_seconds_cpu_for.count()*1000<< "\tdist :"<<elapsed_seconds_dist.count()*1000<<"\tup :"<<elapsed_seconds_up.count()*1000<< "\t cpu_x :"<<domain.cpu_x<<endl; t = t+timestep_length; count++; } /*for(int i =0; i<N; ++i) { cout<<i<<" "<<cpu_par[i]<<"\t"<<gpu_par[i]<<"\t"; if(i%5 == 0) cout<<endl; }*/ auto end = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; std::cout << "elapsed time Unified " << elapsed_seconds.count() << "s\n"; file1<<"elapsed time Unified " << elapsed_seconds.count() << "s\n"; string vtk_name = part_out_name_base+"/Results"+ ".txt"; fstream file; file.open(vtk_name,std::ios_base::app); file<<"Unified Memory Variable_Block :"<<elapsed_seconds.count() << "\t"<<domain.cpu_x<<"\n"; file.close(); file1.close(); }
3413f9daffb07de256d6d885518cc767f769d806.cu
# include <iostream> # include <fstream> # include <string> # include <iomanip> # include <cmath> # include <istream> # include <cuda.h> # include <cuda_runtime.h> # include <cassert> # include <sys/stat.h> # include <chrono> # include <ctime> # include <typeinfo> # include <thrust/tuple.h> # include <omp.h> # include <tuple> # define data_type double using namespace std; string part_input_file, part_out_name_base, vtk_out_name_base; data_type timestep_length, time_end, epsilon, sigma, part_out_freq, vtk_out_freq, cl_workgroup_1dsize; unsigned out_count = 0, vtk_count = 0, x_n, y_n, z_n, cell_size; void checkError(cudaError_t err){ if(err!= cudaSuccess){ std::cout<<cudaGetErrorString(err)<<std::endl; exit(-1); } } //************************************** Structs for variables ************************************************** template <typename T> struct particle{ T x; T y; T z; T vx; T vy; T vz; T m; T force_x; T force_y; T force_z; int cell_num; }; template <typename T> struct Domain { T x_min; T x_max; T y_min; T y_max; T z_min; T z_max; unsigned int x_n; unsigned int y_n; unsigned int z_n; T r_cut; T x_len; T y_len; T z_len; int cell_size; int thread_size; int block_size; int cpu_x = 20; T max_vel = 48; //ramdom value either read from par file or taken as given } ; Domain<data_type> domain; //************************************** Input Parameters from Par ************************************************** void input_para(string name) { string testline; string::size_type sz; ifstream Input (name); if (!Input) { cout << "There was a problem opening the file. Press any key to close.\n"; } while( Input.good() ) { getline ( Input, testline, ' '); if (testline!="") { if (testline == "part_input_file") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); part_input_file = testline ; } else if (testline == "timestep_length") { getline ( Input, testline); timestep_length = stof(testline, &sz) ; } else if (testline == "time_end") { getline ( Input, testline); time_end = stof(testline, &sz) ; } else if (testline == "epsilon") { getline ( Input, testline); epsilon = stof(testline, &sz) ; } else if (testline == "sigma") { getline ( Input, testline); sigma = stof(testline, &sz) ; } else if (testline == "part_out_freq") { getline ( Input, testline); part_out_freq = stof(testline, &sz) ; } else if (testline == "part_out_name_base") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); part_out_name_base = testline ; } else if (testline == "vtk_out_freq") { getline ( Input, testline); vtk_out_freq = stof(testline, &sz) ; } else if (testline == "vtk_out_name_base") { getline ( Input >> ws, testline); if (isalpha(testline[testline.size() - 1]) == false) testline.pop_back(); vtk_out_name_base = testline ; } else if (testline == "cl_workgroup_1dsize") { getline ( Input, testline); domain.thread_size = stoi(testline); } else if (testline == "cl_workgroup_3dsize_x") { getline ( Input, testline); } else if (testline == "cl_workgroup_3dsize_y") { getline ( Input, testline); } else if (testline == "cl_workgroup_3dsize_z") { getline ( Input, testline); } else if (testline == "x_min") { getline ( Input, testline); domain.x_min = stof(testline, &sz) ; } else if (testline == "y_min") { getline ( Input, testline); domain.y_min = stof(testline, &sz) ; } else if (testline == "z_min") { getline ( Input, testline); domain.z_min = stof(testline, &sz) ; } else if (testline == "x_max") { getline ( Input, testline); domain.x_max = stof(testline, &sz) ; } else if (testline == "y_max") { getline ( Input, testline); domain.y_max = stof(testline, &sz) ; } else if (testline == "z_max") { getline ( Input, testline); domain.z_max = stof(testline, &sz) ; } else if (testline == "r_cut") { getline ( Input, testline); domain.r_cut = stof(testline, &sz) ; } else if (testline == "x_n") { getline ( Input, testline); domain.x_n = stoul(testline, nullptr,0) ; } else if (testline == "y_n") { getline ( Input, testline); domain.y_n = stoul(testline, nullptr,0) ; } else if (testline == "z_n") { getline ( Input, testline); domain.z_n = stoul(testline, nullptr,0) ; } else if (testline == "max_vel") { getline ( Input, testline); domain.max_vel = stof(testline, &sz) ; } else if (testline == "cpu_x") { getline ( Input, testline); domain.cpu_x = stof(testline, &sz) ; } } } domain.x_len = (domain.x_max - domain.x_min)/domain.x_n; domain.y_len = (domain.y_max - domain.y_min)/domain.y_n; domain.z_len = (domain.z_max - domain.z_min)/domain.z_n; domain.cell_size = ((domain.x_max - domain.x_min)*(domain.y_max - domain.y_min)*(domain.z_max - domain.z_min))/(domain.x_len*domain.y_len*domain.z_len); domain.cpu_x = int(domain.x_n*domain.cpu_x/100); /*cout<<"part_input_file : "<<part_input_file<<"\t"<<part_input_file.size()<<endl; cout<<"timestep_length : "<<timestep_length<<endl; cout<<"time_end : "<<time_end<<endl; cout<<"epsilon : "<<epsilon<<endl; cout<<"sigma : "<<sigma<<endl; cout<<"part_out_freq : "<<part_out_freq<<endl; cout<<"part_out_name_base : "<<part_out_name_base<<"\t"<<part_out_name_base.size()<<endl; cout<<"vtk_out_freq : "<<vtk_out_freq<<endl; cout<<"vtk_out_name_base : "<<vtk_out_name_base<<"\t"<<vtk_out_name_base.size()<<endl; cout<<"x_min: "<<domain.x_min<<endl; cout<<"y_min: "<<domain.y_min<<endl; cout<<"z_min: "<<domain.z_min<<endl; cout<<"x_max: "<<domain.x_max<<endl; cout<<"y_max: "<<domain.y_max<<endl; cout<<"z_max: "<<domain.z_max<<endl; cout<<"x_n: "<<domain.x_n<<endl; cout<<"y_n: "<<domain.y_n<<endl; cout<<"z_n: "<<domain.z_n<<endl; cout<<"r_cut: "<<domain.r_cut<<endl; cout<<"max_vel: "<<domain.max_vel<<endl; cout<<"cpu_x: "<<domain.cpu_x<<endl;*/ struct stat st; if(stat(&part_out_name_base[0],&st) != 0) { const int dir_err = mkdir(&part_out_name_base[0], S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (-1 == dir_err) { printf("Error creating directory!n"); exit(1); } } string subName = part_out_name_base+ "/Particle_parallel2_"; if(stat(&subName[0],&st) != 0) { const int dir_err = mkdir(&subName[0], S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH); if (-1 == dir_err) { printf("Error creating directory!n"); exit(1); } } } //************************************** Initializing Particles ************************************************** template <typename T> __global__ void init_particle (particle<T> *elements, int num_par) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<num_par) { elements[id].cell_num = id; id = id + blockDim.x * gridDim.x; } } template <typename T> __host__ tuple<int,int> init_cpu_gpu(particle<T> *elements, Domain<T> domain,int num_par, int *cpu_par, int *gpu_par) { int cpu_count = 0, gpu_count = 0; for(int i=0; i<num_par; ++i) { int x_cell = (elements[i].x-domain.x_min)/domain.x_len; cpu_par[i] = -1; gpu_par[i] = -1; if(x_cell > domain.cpu_x) { gpu_par[gpu_count] = i; gpu_count++; } else { cpu_par[cpu_count] = i; cpu_count++; } } return make_tuple(cpu_count,gpu_count); } //************************************** Initializing & updating cell ************************************************** template <typename T> __global__ void init_cell (Domain<T> domain, int *cell_arr) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<domain.cell_size) { cell_arr[id] = -1; id = id + blockDim.x * gridDim.x; } } template <typename T> __device__ void update_cell_calc (particle<T> *elements, int id, Domain<T> domain, int *cell_arr) { int x_cell = (elements[id].x-domain.x_min)/domain.x_len; int y_cell = (elements[id].y-domain.y_min)/domain.y_len; int z_cell = (elements[id].z-domain.z_min)/domain.z_len; int cell_count = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; elements[id].cell_num = atomicExch(&cell_arr[cell_count], elements[id].cell_num); } template <typename T> __global__ void update_cell (particle<T> *elements, int num_par, Domain<T> domain, int *cell_arr) { int id = threadIdx.x + blockIdx.x * blockDim.x; while(id<num_par) { update_cell_calc<T>(elements, id, domain, cell_arr); id = id + blockDim.x * gridDim.x; } } //************************************** Force Calculation using Lenards ************************************************** template <typename T> __device__ thrust::tuple<T,T,T> force ( T dist_x1, T dist_y1, T dist_z1, particle<T> *elements, T epsilon, T sigma, int id , int curr_pos, int coord, Domain<T> domain, int *cell_arr) { T result_x = 0,result_y = 0,result_z = 0; T x,y,z, mag; int temp = id; int curr_z = temp/(domain.x_n*domain.y_n); temp -= curr_z*domain.x_n*domain.y_n; int curr_y = temp/domain.x_n; int curr_x = temp % domain.x_n; for(int p = curr_x-1; p<curr_x+2; ++p) { for(int q = curr_y-1; q<curr_y+2; ++q) { for(int r = curr_z-1; r<curr_z+2; ++r) { if ( p >= 0 && p < domain.x_n && q >= 0 && q < domain.y_n && r >= 0 && r < domain.z_n) { int cell_index = p + q*domain.y_n + r*domain.y_n*domain.x_n; int i = cell_arr[cell_index]; while (i != -1) { if (i != curr_pos) { if( dist_x1 < elements[i].x) { if( abs(dist_x1 - elements[i].x) <= (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x))) x = dist_x1 - elements[i].x ; else x = (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x)); } else { if(abs(elements[i].x - dist_x1) <= (abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min))) x = dist_x1 - elements[i].x ; else x = -(abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min)); } if( dist_y1 < elements[i].y) { if( abs(dist_y1 - elements[i].y) <= (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y))) y = dist_y1 - elements[i].y ; else y = (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y)); } else { if(abs(elements[i].y - dist_y1) <= (abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min))) y = dist_y1 - elements[i].y ; else y = -(abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min)); } if( dist_z1 < elements[i].z) { if( abs(dist_z1 - elements[i].z) <= (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z))) z = dist_z1 - elements[i].z ; else z = (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z)); } else { if(abs(elements[i].z - dist_z1) <= (abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min))) z = dist_z1 - elements[i].z ; else z = -(abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min)); } // magnitude of the distance vector mag = sqrt(pow(x,2)+ pow(y,2) + pow(z,2)); if(mag<=domain.r_cut) { result_x= result_x + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * x )); result_y= result_y + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * y )); result_z= result_z + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * z )); } } i = elements[i].cell_num; } } } } } return thrust::make_tuple(result_x,result_y,result_z) ; } //************************************** Initialization of Forces ************************************************** template <typename T> __global__ void init_force ( particle<T> *elements, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr) { int curr_pos = threadIdx.x + blockIdx.x * blockDim.x; while( curr_pos < num_par) { thrust::tuple<T,T,T> result; int x_cell = (elements[curr_pos].x-domain.x_min)/domain.x_len; int y_cell = (elements[curr_pos].y-domain.y_min)/domain.y_len; int z_cell = (elements[curr_pos].z-domain.z_min)/domain.z_len; int id = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; result = force<T> (elements[curr_pos].x, elements[curr_pos].y, elements[curr_pos].z, elements, epsilon, sigma, id, curr_pos, 1, domain, cell_arr); elements[curr_pos].force_x = thrust::get<0>(result); elements[curr_pos].force_y = thrust::get<1>(result); elements[curr_pos].force_z = thrust::get<2>(result); curr_pos = curr_pos + blockDim.x * gridDim.x; } } //************************************** Kernel to calculate force ************************************************** template <typename T> __global__ void main_kernel_force (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr, int *gpu_par, int gpu_count ) { int idg = threadIdx.x + blockIdx.x * blockDim.x; T force_x_o, force_y_o, force_z_o; while( idg < gpu_count) { int id = gpu_par[idg]; if(id != -1) { int x_cell = (elements[id].x-domain.x_min)/domain.x_len; int y_cell = (elements[id].y-domain.y_min)/domain.y_len; int z_cell = (elements[id].z-domain.z_min)/domain.z_len; int idc = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; thrust::tuple<T,T,T> result; force_x_o = elements[id].force_x; force_y_o = elements[id].force_y; force_z_o = elements[id].force_z; result = force<T> (elements[id].x, elements[id].y, elements[id].z, elements, epsilon, sigma, idc, id, 1, domain, cell_arr); // Shooting elements[id].force_x = thrust::get<0>(result); elements[id].force_y = thrust::get<1>(result); elements[id].force_z = thrust::get<2>(result); elements[id].vx = elements[id].vx + ((force_x_o+elements[id].force_x)*delta_t/(2*elements[id].m)); if(abs(elements[id].vx)>domain.max_vel) elements[id].vx = domain.max_vel*(elements[id].vx/abs(elements[id].vx)); elements[id].vy = elements[id].vy + ((force_y_o+elements[id].force_y)*delta_t/(2*elements[id].m)); if(abs(elements[id].vy)>domain.max_vel) elements[id].vy = domain.max_vel*(elements[id].vy/abs(elements[id].vy)); elements[id].vz = elements[id].vz + ((force_z_o+elements[id].force_z)*delta_t/(2*elements[id].m)); if(abs(elements[id].vz)>domain.max_vel) elements[id].vz = domain.max_vel*(elements[id].vz/abs(elements[id].vz)); } idg = idg + blockDim.x * gridDim.x; } } //************************************** Host to calculate force ************************************************** template <typename T> __host__ void main_host_force (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr, int *cpu_par, int cpu_count ) { #pragma omp parallel for for(int k=0 ; k < cpu_count; ++k) { int j = cpu_par[k]; if(j != -1) { T force_x_o, force_y_o, force_z_o; int x_cell = (elements[j].x-domain.x_min)/domain.x_len; int y_cell = (elements[j].y-domain.y_min)/domain.y_len; int z_cell = (elements[j].z-domain.z_min)/domain.z_len; int idc = x_cell + y_cell*domain.x_n + z_cell*domain.x_n*domain.y_n; force_x_o = elements[j].force_x; force_y_o = elements[j].force_y; force_z_o = elements[j].force_z; T result_x = 0; T result_y = 0; T result_z = 0; T x,y,z, mag; T dist_x1 = elements[j].x; T dist_y1 = elements[j].y; T dist_z1 = elements[j].z; int temp = idc; int curr_z = temp/(domain.x_n*domain.y_n); temp -= curr_z*domain.x_n*domain.y_n; int curr_y = temp/domain.x_n; int curr_x = temp % domain.x_n; for(int p = curr_x-1; p<curr_x+2; ++p) { for(int q = curr_y-1; q<curr_y+2; ++q) { for(int r = curr_z-1; r<curr_z+2; ++r) { if ( p >= 0 && p < domain.x_n && q >= 0 && q < domain.y_n && r >= 0 && r < domain.z_n) { int cell_index = p + q*domain.y_n + r*domain.y_n*domain.x_n; int i = cell_arr[cell_index]; while (i != -1) { if (i != j) { if( dist_x1 < elements[i].x) { if( abs(dist_x1 - elements[i].x) <= (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x))) x = dist_x1 - elements[i].x ; else x = (abs(dist_x1 - domain.x_min) + abs(domain.x_max-elements[i].x)); } else { if(abs(elements[i].x - dist_x1) <= (abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min))) x = dist_x1 - elements[i].x ; else x = -(abs(domain.x_max - dist_x1) + abs(elements[i].x-domain.x_min)); } if( dist_y1 < elements[i].y) { if( abs(dist_y1 - elements[i].y) <= (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y))) y = dist_y1 - elements[i].y ; else y = (abs(dist_y1 - domain.y_min) + abs(domain.y_max-elements[i].y)); } else { if(abs(elements[i].y - dist_y1) <= (abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min))) y = dist_y1 - elements[i].y ; else y = -(abs(domain.y_max - dist_y1) + abs(elements[i].y-domain.y_min)); } if( dist_z1 < elements[i].z) { if( abs(dist_z1 - elements[i].z) <= (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z))) z = dist_z1 - elements[i].z ; else z = (abs(dist_z1 - domain.z_min) + abs(domain.z_max-elements[i].z)); } else { if(abs(elements[i].z - dist_z1) <= (abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min))) z = dist_z1 - elements[i].z ; else z = -(abs(domain.z_max - dist_z1) + abs(elements[i].z-domain.z_min)); } // magnitude of the distance vector mag = sqrt(pow(x,2)+ pow(y,2) + pow(z,2)); if(mag<=domain.r_cut) { result_x= result_x + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * x )); result_y= result_y + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * y )); result_z= result_z + (((24 * epsilon/ (mag * mag)) * (pow((sigma/mag),6)) * (2 * (pow((sigma/mag),6)) -1 ) * z )); } } i = elements[i].cell_num; } } } } } elements[j].force_x = result_x; elements[j].force_y = result_y; elements[j].force_z = result_z; elements[j].vx = elements[j].vx + ((force_x_o+elements[j].force_x)*delta_t/(2*elements[j].m)); if(abs(elements[j].vx)>domain.max_vel) elements[j].vx = domain.max_vel*(elements[j].vx/abs(elements[j].vx)); elements[j].vy = elements[j].vy + ((force_y_o+elements[j].force_y)*delta_t/(2*elements[j].m)); if(abs(elements[j].vy)>domain.max_vel) elements[j].vy = domain.max_vel*(elements[j].vy/abs(elements[j].vy)); elements[j].vz = elements[j].vz + ((force_z_o+elements[j].force_z)*delta_t/(2*elements[j].m)); if(abs(elements[j].vz)>domain.max_vel) elements[j].vz = domain.max_vel*(elements[j].vz/abs(elements[j].vz)); } } } //************************************** kernel to calculate distance ************************************************** template <typename T> __global__ void main_kernel_dist (particle<T> *elements, T delta_t, T epsilon, T sigma, int num_par, Domain<T> domain, int * cell_arr ) { int id = threadIdx.x + blockIdx.x * blockDim.x; while( id < num_par) { elements[id].x = elements[id].x + (delta_t * elements[id].vx) + ( (elements[id].force_x * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].x >= domain.x_max ) { elements[id].x = elements[id].x - (domain.x_max-domain.x_min); } else if (elements[id].x <= domain.x_min) { elements[id].x = (domain.x_max-domain.x_min) + elements[id].x; } elements[id].y = elements[id].y + (delta_t * elements[id].vy) + ( (elements[id].force_y * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].y >= domain.y_max) { elements[id].y = elements[id].y - (domain.y_max-domain.y_min); } else if (elements[id].y <= domain.y_min) { elements[id].y = (domain.y_max-domain.y_min) + elements[id].y; } elements[id].z = elements[id].z + (delta_t * elements[id].vz) + ( (elements[id].force_z * delta_t * delta_t)/(2*elements[id].m)); if( elements[id].z >= domain.z_max ) { elements[id].z= elements[id].z - (domain.z_max-domain.z_min); } else if (elements[id].z <= domain.z_min) { elements[id].z = (domain.z_max-domain.z_min) + elements[id].z; } id = id + blockDim.x * gridDim.x; } } //************************************** VTK and OUT file write ************************************************** template <typename T> void vtk (particle<T> *elements, int num_par) { string vtk_name = part_out_name_base+"/Particle_parallel2_/"+vtk_out_name_base + "_" + to_string(vtk_count) + ".vtk"; fstream file; string type = typeid(elements[1].m).name(); if(type == "d") type = "double"; else if(type == "f") type = "float"; file.open(vtk_name,ios::out); file << "# vtk DataFile Version 4.0" <<"\n" << "hesp visualization file" << "\n" << "ASCII" << "\n" << "DATASET UNSTRUCTURED_GRID" << "\n" << "POINTS "<< num_par<<" "<< type << "\n"; for( int i = 0; i< num_par; ++i) { file <<fixed<<setprecision(6)<< elements[i].x <<" "<<fixed<<setprecision(6)<< elements[i].y <<" "<<fixed<<setprecision(6)<< elements[i].z <<"\n"; } file << "CELLS 0 0" << "\n" << "CELL_TYPES 0" << "\n" << "POINT_DATA " <<num_par<< "\n" << "SCALARS m "<< type << "\n" << "LOOKUP_TABLE default" << "\n"; for( int i = 0; i< num_par; ++i) { file <<fixed<<setprecision(6)<< elements[i].m <<"\n"; } file << "VECTORS v "<< type <<"\n"; for( int i = 0; i< num_par; ++i) { file<<fixed<<setprecision(6) << elements[i].vx <<" "<<fixed<<setprecision(6)<< elements[i].vy <<" "<<fixed<<setprecision(6)<< elements[i].vz <<"\n"; } file.close(); vtk_count++; } template <typename T> void out ( particle<T> *elements, int num_par) { string out_name = part_out_name_base+"/Particle_parallel2_/"+part_out_name_base + "_" + to_string(out_count) + ".out"; fstream file; file.open(out_name,ios::out); file << num_par <<"\n"; for(int i = 0; i< num_par; ++i) { file<<fixed<<setprecision(6)<< elements[i].m << " "<<fixed<<setprecision(6)<< elements[i].x <<" "<<fixed<<setprecision(6)<< elements[i].y <<" "<<fixed<<setprecision(6)<< elements[i].z <<" "<<fixed<<setprecision(6)<< elements[i].vx<<" "<<fixed<<setprecision(6)<< elements[i].vy << " "<<fixed<<setprecision(6)<< elements[i].vz<< endl; } file.close(); out_count++; } //************************************** Main ************************************************** int main( int argc, char *argv[] ) { data_type t = 0; int N,i=0,count = 0, *d_cell_arr, *cpu_par, *gpu_par; double negative_ratio = 1, positiv_ratio = 1; bool file_val = true; string::size_type sz; string testline, name; input_para(argv[1]); tuple<int,int> cpu_gpu_count; int cpu_count = 0; int gpu_count = 0; assert((domain.x_max-domain.x_min)/domain.x_n >= domain.r_cut && (domain.y_max-domain.y_min)/domain.y_n >= domain.r_cut && (domain.z_max-domain.z_min)/domain.z_n >= domain.r_cut); ifstream Input (part_input_file); getline ( Input, testline); N = atoi(testline.c_str()) ; particle<data_type> *elements; checkError(cudaMallocManaged(&elements,N*sizeof(particle<data_type>))); // create object for particle structure in device checkError(cudaMallocManaged((void**)&cpu_par,N*sizeof(int))); checkError(cudaMallocManaged((void**)&gpu_par,N*sizeof(int))); //************************************** Read input values ************************************************** while( !Input.eof() && file_val ) { file_val = getline ( Input, testline,' '); if(file_val == false ) break; elements[i].m = stod(testline, &sz) ; getline ( Input, testline,' '); elements[i].x = stod(testline, &sz) ; if( elements[i].x > domain.x_max ) { elements[i].x = elements[i].x - (domain.x_max-domain.x_min); } else if (elements[i].x < domain.x_min) { elements[i].x = (domain.x_max-domain.x_min) + elements[i].x; } getline ( Input, testline,' '); elements[i].y = stod(testline, &sz) ; if( elements[i].y > domain.y_max) { elements[i].y = elements[i].y - (domain.y_max-domain.y_min); } else if (elements[i].y < domain.y_min) { elements[i].y = (domain.y_max-domain.y_min) + elements[i].y; } getline ( Input, testline,' '); elements[i].z = stod(testline, &sz) ; if( elements[i].z > domain.z_max ) { elements[i].z = elements[i].z - (domain.z_max-domain.z_min); } else if (elements[i].z < domain.z_min) { elements[i].z = (domain.z_max-domain.z_min) + elements[i].z; } getline ( Input, testline,' '); elements[i].vx = stod(testline, &sz) ; getline ( Input, testline,' '); elements[i].vy = stod(testline, &sz) ; getline ( Input, testline,'\n'); elements[i].vz = stod(testline, &sz) ; elements[i].force_x = 0; elements[i].force_y = 0; elements[i].force_z = 0; elements[i].cell_num = i; cpu_par[i] = -1; gpu_par[i] = -1; i++; } domain.block_size = N/domain.thread_size +1; auto start = std::chrono::system_clock::now(); checkError(cudaMallocManaged((void**)&d_cell_arr,domain.cell_size*sizeof(int))); // // cell array initializatio in device init_cell<data_type><<<domain.block_size,domain.thread_size>>>(domain, d_cell_arr); // initializing cell array with -1 cpu_gpu_count = init_cpu_gpu<data_type>(elements,domain,N,cpu_par,gpu_par); cpu_count = get<0>(cpu_gpu_count); gpu_count = get<1>(cpu_gpu_count); update_cell<data_type><<<domain.block_size,domain.thread_size>>>(elements,N,domain, d_cell_arr); // update cell array with particle pisition checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); init_force<data_type><<<domain.block_size,domain.thread_size>>>(elements,epsilon,sigma, N, domain, d_cell_arr); // calculate initial force checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); string result_name = part_out_name_base+"/Particle_parallel2_/result_time.txt"; fstream file1; file1.open(result_name,std::ios_base::app); while(count <= int(std::round(time_end/timestep_length))) { // ************************ write out vtk and out ********************** /* if((count%int(vtk_out_freq)) == 0 ) { vtk<data_type>(&elements[0],N); } if( (count%int(part_out_freq)) == 0 ) { out<data_type>(&elements[0],N); }*/ // ************************ Calculate distance ********************** /*int device = -1; checkError(cudaGetDevice(&device)); checkError(cudaMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(cudaMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); //checkError(cudaDeviceSynchronize());*/ auto start_dist = std::chrono::system_clock::now(); main_kernel_dist<data_type><<<domain.block_size,domain.thread_size>>>(elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); init_particle<data_type><<<domain.block_size,domain.thread_size>>>(elements,N); // CHANGE init_cell<data_type><<<domain.block_size,domain.thread_size>>>(domain, d_cell_arr); checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); auto end_dist = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_dist = end_dist-start_dist; auto start_up = std::chrono::system_clock::now(); update_cell<data_type><<<domain.block_size,domain.thread_size>>>(elements,N,domain, d_cell_arr); // update cell array with particle pisition checkError(cudaPeekAtLastError()); checkError(cudaDeviceSynchronize()); auto end_up = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_up = end_up-start_up; /* checkError(cudaGetDevice(&device)); checkError(cudaMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(cudaMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL));*/ //checkError(cudaDeviceSynchronize()); auto start_cpu_for = std::chrono::system_clock::now(); cpu_gpu_count = init_cpu_gpu<data_type>(elements,domain,N,cpu_par,gpu_par); cpu_count = get<0>(cpu_gpu_count); gpu_count = get<1>(cpu_gpu_count); auto end_cpu_for = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_cpu_for = end_cpu_for-start_cpu_for; // ************************ calculate force ********************** /* checkError(cudaGetDevice(&device)); checkError(cudaMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(cudaMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); checkError(cudaDeviceSynchronize());*/ cudaEvent_t start, stop; checkError(cudaEventCreate(&start)); checkError(cudaEventCreate(&stop)); checkError(cudaEventRecord(start)); domain.block_size = gpu_count/domain.thread_size +1; main_kernel_force<data_type><<<domain.block_size,domain.thread_size>>>(elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr, gpu_par, gpu_count); checkError(cudaEventRecord(stop)); /* checkError(cudaGetDevice(&device)); checkError(cudaMemPrefetchAsync(elements, N*sizeof(particle<data_type>), device, NULL)); checkError(cudaMemPrefetchAsync(d_cell_arr, domain.cell_size*sizeof(int), device, NULL)); checkError(cudaDeviceSynchronize());*/ auto start_cpu = std::chrono::system_clock::now(); main_host_force<data_type>(elements,timestep_length,epsilon,sigma,N,domain,d_cell_arr, cpu_par, cpu_count); auto end_cpu = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds_cpu = end_cpu-start_cpu; checkError(cudaEventSynchronize(stop)); checkError(cudaPeekAtLastError()); //checkError(cudaDeviceSynchronize()); float milliseconds = 0; checkError(cudaEventElapsedTime(&milliseconds, start, stop)); float time_ratio = (milliseconds/(elapsed_seconds_cpu.count()*1000)); if(time_ratio<1) { negative_ratio = 1; positiv_ratio = positiv_ratio *1.2; domain.cpu_x = int(domain.cpu_x/positiv_ratio); if( domain.cpu_x == 0) domain.cpu_x = 1; } else if(time_ratio>1.5) { if( domain.cpu_x == 0) domain.cpu_x = 1; positiv_ratio = 1; negative_ratio = negative_ratio*1.2; domain.cpu_x = int(domain.cpu_x*negative_ratio); //if(domain.cpu_x > domain.x_n-1) domain.cpu_x = domain.x_n-1; } else { positiv_ratio = 1; negative_ratio = 1; } domain.block_size = N/domain.thread_size +1; //if(count<500) //cout<<count<<"\tGpu: "<<milliseconds<<"\tCPU :"<<elapsed_seconds_cpu.count()*1000<<"\tCPU_for :"<<elapsed_seconds_cpu_for.count()*1000<< "\tdist :"<<elapsed_seconds_dist.count()*1000<<"\tup :"<<elapsed_seconds_up.count()*1000<<"\tcpu_count:"<<cpu_count<<"\tGPU_count:"<<gpu_count<< "\t cpu_x :"<<domain.cpu_x<<endl; file1<<count<<"\tGpu: "<<milliseconds<<"\tCPU :"<<elapsed_seconds_cpu.count()*1000<<"\tCPU_for :"<<elapsed_seconds_cpu_for.count()*1000<< "\tdist :"<<elapsed_seconds_dist.count()*1000<<"\tup :"<<elapsed_seconds_up.count()*1000<< "\t cpu_x :"<<domain.cpu_x<<endl; t = t+timestep_length; count++; } /*for(int i =0; i<N; ++i) { cout<<i<<" "<<cpu_par[i]<<"\t"<<gpu_par[i]<<"\t"; if(i%5 == 0) cout<<endl; }*/ auto end = std::chrono::system_clock::now(); std::chrono::duration<double> elapsed_seconds = end-start; std::cout << "elapsed time Unified " << elapsed_seconds.count() << "s\n"; file1<<"elapsed time Unified " << elapsed_seconds.count() << "s\n"; string vtk_name = part_out_name_base+"/Results"+ ".txt"; fstream file; file.open(vtk_name,std::ios_base::app); file<<"Unified Memory Variable_Block :"<<elapsed_seconds.count() << "\t"<<domain.cpu_x<<"\n"; file.close(); file1.close(); }
47ee561ae386a4bc70d963ee3a809f1e088b17f9.hip
// !!! This is a file automatically generated by hipify!!! #include "vec_add.h" #include <hip/hip_runtime.h> #include <cstdio> #include <cstdlib> int main() { int n = 256; int *a, *b, *c, *da, *db, *dc; a = (int*)malloc(n * sizeof(int)); b = (int*)malloc(n * sizeof(int)); c = (int*)malloc(n * sizeof(int)); for(int i = 0; i < n; ++i) a[i] = b[i] = 1; if(hipSuccess != hipMalloc((void **)&da, n * sizeof(int))) { puts("Error"); } hipMalloc((void **)&db, n * sizeof(int)); hipMalloc((void **)&dc, n * sizeof(int)); hipMemcpy(da, a, n * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(db, b, n * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( VecAdd), dim3(1), dim3(n), 0, 0, da, db, dc, n); hipMemcpy(c, dc, n * sizeof(int), hipMemcpyDeviceToHost); for(int i = 0; i < n; ++i) { if(c[i] != 2) { printf("Error\n"); exit(1); } } hipFree(da); hipFree(db); hipFree(dc); free(a); free(b); free(c); return 0; }
47ee561ae386a4bc70d963ee3a809f1e088b17f9.cu
#include "vec_add.h" #include <cuda.h> #include <cstdio> #include <cstdlib> int main() { int n = 256; int *a, *b, *c, *da, *db, *dc; a = (int*)malloc(n * sizeof(int)); b = (int*)malloc(n * sizeof(int)); c = (int*)malloc(n * sizeof(int)); for(int i = 0; i < n; ++i) a[i] = b[i] = 1; if(cudaSuccess != cudaMalloc((void **)&da, n * sizeof(int))) { puts("Error"); } cudaMalloc((void **)&db, n * sizeof(int)); cudaMalloc((void **)&dc, n * sizeof(int)); cudaMemcpy(da, a, n * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(db, b, n * sizeof(int), cudaMemcpyHostToDevice); VecAdd<<<1, n>>>(da, db, dc, n); cudaMemcpy(c, dc, n * sizeof(int), cudaMemcpyDeviceToHost); for(int i = 0; i < n; ++i) { if(c[i] != 2) { printf("Error\n"); exit(1); } } cudaFree(da); cudaFree(db); cudaFree(dc); free(a); free(b); free(c); return 0; }
1052d9a80ec368d3e9eeaf25bc1d77a994c6de8b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <onnxplugin/onnxplugin.hpp> #include <cuda_fp16.hpp> using namespace ONNXPlugin; static __global__ void hswish_kernel_fp32(float* input, float* output, int edge) { KernelPositionBlock; float x = input[position]; float a = x + 3; a = a < 0 ? 0 : (a >= 6 ? 6 : a); output[position] = x * a / 6; } // static __global__ void hswish_kernel_fp16(__half* input, __half* output, int edge) { // KernelPositionBlock; // __half _six = 6.0f; // __half _three = 3.0f; // __half x = input[position]; // __half a = x + _three; // __half _zero = 0.0f; // a = a < _zero ? _zero : (a >= _six ? _six : a); // output[position] = x * a / _six; // } class HSwish : public TRTPlugin { public: SetupPlugin(HSwish); virtual void config_finish() override{ // INFO("init hswish config: %s", config_->info_.c_str()); // INFO("weights = %d", config_->weights_.size()); // for(int i = 0; i < config_->weights_.size(); ++i){ // auto& w = config_->weights_[i]; // if(w->type() == TRT::DataType::Float16){ // INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), float(w->at<__half>(0))); // }else{ // INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), w->at<float>(0)); // } // } } virtual std::shared_ptr<LayerConfig> new_config() override{ auto cfg = TRTPlugin::new_config(); //cfg->support_dtype_set_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; cfg->support_dtype_set_ = {nvinfer1::DataType::kFLOAT}; return cfg; } virtual nvinfer1::DimsExprs getOutputDimensions( int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override{ return inputs[0]; } int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, hipStream_t stream) override{ int count = inputs[0].count(); auto grid = CUDATools::grid_dims(count); auto block = CUDATools::block_dims(count); if (config_->usage_dtype_ == TRT::DataType::Float) { hipLaunchKernelGGL(( hswish_kernel_fp32) , dim3(grid), dim3(block), 0, stream , inputs[0].ptr<float>(), outputs[0].ptr<float>(), count); } else if (config_->usage_dtype_ == TRT::DataType::Float16) { // hswish_kernel_fp16 <<<grid, block, 0, stream >>> (inputs[0].ptr<__half>(), outputs[0].ptr<__half>(), count); INFOF("not implement function"); } else{ INFOF("not implement function"); } return 0; } }; RegisterPlugin(HSwish);
1052d9a80ec368d3e9eeaf25bc1d77a994c6de8b.cu
#include <onnxplugin/onnxplugin.hpp> #include <cuda_fp16.hpp> using namespace ONNXPlugin; static __global__ void hswish_kernel_fp32(float* input, float* output, int edge) { KernelPositionBlock; float x = input[position]; float a = x + 3; a = a < 0 ? 0 : (a >= 6 ? 6 : a); output[position] = x * a / 6; } // static __global__ void hswish_kernel_fp16(__half* input, __half* output, int edge) { // KernelPositionBlock; // __half _six = 6.0f; // __half _three = 3.0f; // __half x = input[position]; // __half a = x + _three; // __half _zero = 0.0f; // a = a < _zero ? _zero : (a >= _six ? _six : a); // output[position] = x * a / _six; // } class HSwish : public TRTPlugin { public: SetupPlugin(HSwish); virtual void config_finish() override{ // INFO("init hswish config: %s", config_->info_.c_str()); // INFO("weights = %d", config_->weights_.size()); // for(int i = 0; i < config_->weights_.size(); ++i){ // auto& w = config_->weights_[i]; // if(w->type() == TRT::DataType::Float16){ // INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), float(w->at<__half>(0))); // }else{ // INFO("Weight[%d] shape is %s, dtype = %s, value[0] = %f", i, w->shape_string(), data_type_string(w->type()), w->at<float>(0)); // } // } } virtual std::shared_ptr<LayerConfig> new_config() override{ auto cfg = TRTPlugin::new_config(); //cfg->support_dtype_set_ = {nvinfer1::DataType::kHALF, nvinfer1::DataType::kFLOAT}; cfg->support_dtype_set_ = {nvinfer1::DataType::kFLOAT}; return cfg; } virtual nvinfer1::DimsExprs getOutputDimensions( int32_t outputIndex, const nvinfer1::DimsExprs* inputs, int32_t nbInputs, nvinfer1::IExprBuilder& exprBuilder) noexcept override{ return inputs[0]; } int enqueue(const std::vector<GTensor>& inputs, std::vector<GTensor>& outputs, const std::vector<GTensor>& weights, void* workspace, cudaStream_t stream) override{ int count = inputs[0].count(); auto grid = CUDATools::grid_dims(count); auto block = CUDATools::block_dims(count); if (config_->usage_dtype_ == TRT::DataType::Float) { hswish_kernel_fp32 <<<grid, block, 0, stream >>> (inputs[0].ptr<float>(), outputs[0].ptr<float>(), count); } else if (config_->usage_dtype_ == TRT::DataType::Float16) { // hswish_kernel_fp16 <<<grid, block, 0, stream >>> (inputs[0].ptr<__half>(), outputs[0].ptr<__half>(), count); INFOF("not implement function"); } else{ INFOF("not implement function"); } return 0; } }; RegisterPlugin(HSwish);
b691c877df44197bc01379f892daf6b3baab0cf4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> #include<stdio.h> #include<malloc.h> #include<opencv2/opencv.hpp> using namespace std; using namespace cv; #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH 3 __constant__ char d_Mask[MASK_WIDTH * MASK_WIDTH]; __device__ unsigned char clamp(int value){ if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void convolutionCU(unsigned char *imageInput, int rows, int cols, unsigned char *imageOutput){ int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; int sum = 0; if (i < rows && j < cols) { int aux_cols = j - 1, aux_rows = i - 1; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { if(aux_rows >= 0 && aux_cols >= 0 && aux_rows < rows && aux_cols < cols) sum += d_Mask[(k*3) + l] * imageInput[(aux_rows*cols) + aux_cols]; aux_cols++; } aux_rows++; aux_cols = j - 1; } imageOutput[(i * cols) + j] = clamp(sum); } } __global__ void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114; } } __global__ void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){ int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; if (i < rows && j < cols){ imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gy[(i * cols) + j] * Gy[(i * cols) + j]) ); } } void writeTimes(Size s, char* fileName, double Time){ long size = s.width * s.height; FILE *f = fopen("global.time", "a"); if (f == NULL) printf("Error opening file!\n"); else { fprintf(f, "%ld %s %lf\n", size, fileName, Time); } fclose(f); } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: Image path\n"); return 1; } //---------> declaracion de variables hipError_t error = hipSuccess; //times clock_t start, end; double time_used; char* imageName = argv[1]; //imagen inicial unsigned char *h_ImageInit; unsigned char *d_ImageInit; //imagen en grises unsigned char *d_imageGray; //imagenes con filtro en X y en Y unsigned char *d_Gx, *d_Gy; //imagen final unsigned char *h_G, *d_G; //mascaras device char h_XMask[] = {-1, 0, 1,-2, 0, 2,-1, 0, 1}; char h_YMask[] = {-1,-2,-1, 0, 0, 0, 1, 2, 1}; //carga la imagen inicial Mat image; image = imread(imageName, 1); if (!image.data) { printf("No image Data\n"); return 1; } //se toman los parametros de la imagen Size s = image.size(); int width = s.width; int height = s.height; int size = sizeof(unsigned char) * width * height * image.channels(); int sizeGray = sizeof(unsigned char) * width * height; //---------> Reservando memoria para el Host y Device //Imagen inicial en el Host h_ImageInit = (unsigned char*)malloc(size); //imagen final host h_G = (unsigned char*)malloc(sizeGray); //---------> hipMalloc //imagen inicial device error = hipMalloc((void**)&d_ImageInit,size); if (error != hipSuccess) { printf("Error allocating memory for d_imageInput\n"); exit(-1); } //imagen en grises device error = hipMalloc((void**)&d_imageGray, sizeGray); if (error != hipSuccess) { printf("Error allocating memory for d_imageGray\n"); exit(-1); } error = hipMalloc((void**)&d_Gx, sizeGray); if (error != hipSuccess) { printf("Error allocating memory for d_Gx\n"); exit(-1); } //imagen convolucion Gy device error = hipMalloc((void**)&d_Gy, sizeGray); if (error != hipSuccess) { printf("Error allocating memory for d_Gy\n"); exit(-1); } //imagen final en device Union error = hipMalloc((void**)&d_G, sizeGray); if (error != hipSuccess) { printf("Error allocating memory for d_G\n"); exit(-1); } //inicia reloj start = clock(); //---------> CudaMemCpy //carga la imagen inicial h_ImageInit = image.data; error = hipMemcpy(d_ImageInit, h_ImageInit, size, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("Error copiando imagen inicial de host a device\n"); exit(-1); } //mascaras error = hipMemcpyToSymbol(d_Mask, h_YMask, 3*3*sizeof(char)); if (error != hipSuccess) { printf("Error copiando mascara Y de host a constante\n"); exit(-1); } //---------> Grises int blockSize = 32; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1); hipLaunchKernelGGL(( img2grayCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_ImageInit, width, height, d_imageGray); hipDeviceSynchronize(); //---------> Convoluciones // Convolution in Gx hipLaunchKernelGGL(( convolutionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, height, width, d_Gy); hipDeviceSynchronize(); //Se copian los datos de la mascara Y del host a la memoria constante error = hipMemcpyToSymbol(d_Mask, h_XMask, 3*3*sizeof(char)); if (error != hipSuccess) { printf("Error copiando mascara X de host a constante\n"); exit(-1); } // Convolution in Gy hipLaunchKernelGGL(( convolutionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_imageGray, height, width, d_Gx); hipDeviceSynchronize(); // Union of Gx and Gy ///// Sobel hipLaunchKernelGGL(( UnionCU), dim3(dimGrid),dim3(dimBlock), 0, 0, d_G, d_Gx, d_Gy, height, width); hipDeviceSynchronize(); //Resultado de Union error = hipMemcpy(h_G, d_G, sizeGray, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("Error copiando resultado del device al host\n"); exit(-1); } //Se obtiene el tiempo final end = clock(); //crea la imagen resultante Mat result_img; result_img.create(height, width, CV_8UC1); result_img.data = h_G; imwrite("imgResult.jpg", result_img); //se calculan tiempos time_used = ((double) (end - start)) /CLOCKS_PER_SEC; printf ("%lf \n",time_used); //liberar memoria free(h_ImageInit); free(h_G); hipFree(d_ImageInit); hipFree(d_imageGray); hipFree(d_Mask); hipFree(d_Gx); hipFree(d_Gy); hipFree(d_G); return 0; }
b691c877df44197bc01379f892daf6b3baab0cf4.cu
#include<iostream> #include<stdio.h> #include<malloc.h> #include<opencv2/opencv.hpp> using namespace std; using namespace cv; #define RED 2 #define GREEN 1 #define BLUE 0 #define MASK_WIDTH 3 __constant__ char d_Mask[MASK_WIDTH * MASK_WIDTH]; __device__ unsigned char clamp(int value){ if(value < 0) value = 0; else if(value > 255) value = 255; return (unsigned char)value; } __global__ void convolutionCU(unsigned char *imageInput, int rows, int cols, unsigned char *imageOutput){ int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; int sum = 0; if (i < rows && j < cols) { int aux_cols = j - 1, aux_rows = i - 1; for (int k = 0; k < 3; k++) { for (int l = 0; l < 3; l++) { if(aux_rows >= 0 && aux_cols >= 0 && aux_rows < rows && aux_cols < cols) sum += d_Mask[(k*3) + l] * imageInput[(aux_rows*cols) + aux_cols]; aux_cols++; } aux_rows++; aux_cols = j - 1; } imageOutput[(i * cols) + j] = clamp(sum); } } __global__ void img2grayCU(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if((row < height) && (col < width)){ imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 + imageInput[(row*width+col)*3+BLUE]*0.114; } } __global__ void UnionCU(unsigned char *imageOutput, unsigned char *Gx, unsigned char *Gy, int rows, int cols){ int i = blockIdx.y*blockDim.y+threadIdx.y; int j = blockIdx.x*blockDim.x+threadIdx.x; if (i < rows && j < cols){ imageOutput[(i * cols) + j] = sqrtf((Gx[(i * cols) + j] * Gx[(i * cols) + j]) + (Gy[(i * cols) + j] * Gy[(i * cols) + j]) ); } } void writeTimes(Size s, char* fileName, double Time){ long size = s.width * s.height; FILE *f = fopen("global.time", "a"); if (f == NULL) printf("Error opening file!\n"); else { fprintf(f, "%ld %s %lf\n", size, fileName, Time); } fclose(f); } int main(int argc, char **argv) { if (argc != 2) { printf("Usage: Image path\n"); return 1; } //---------> declaracion de variables cudaError_t error = cudaSuccess; //times clock_t start, end; double time_used; char* imageName = argv[1]; //imagen inicial unsigned char *h_ImageInit; unsigned char *d_ImageInit; //imagen en grises unsigned char *d_imageGray; //imagenes con filtro en X y en Y unsigned char *d_Gx, *d_Gy; //imagen final unsigned char *h_G, *d_G; //mascaras device char h_XMask[] = {-1, 0, 1,-2, 0, 2,-1, 0, 1}; char h_YMask[] = {-1,-2,-1, 0, 0, 0, 1, 2, 1}; //carga la imagen inicial Mat image; image = imread(imageName, 1); if (!image.data) { printf("No image Data\n"); return 1; } //se toman los parametros de la imagen Size s = image.size(); int width = s.width; int height = s.height; int size = sizeof(unsigned char) * width * height * image.channels(); int sizeGray = sizeof(unsigned char) * width * height; //---------> Reservando memoria para el Host y Device //Imagen inicial en el Host h_ImageInit = (unsigned char*)malloc(size); //imagen final host h_G = (unsigned char*)malloc(sizeGray); //---------> cudaMalloc //imagen inicial device error = cudaMalloc((void**)&d_ImageInit,size); if (error != cudaSuccess) { printf("Error allocating memory for d_imageInput\n"); exit(-1); } //imagen en grises device error = cudaMalloc((void**)&d_imageGray, sizeGray); if (error != cudaSuccess) { printf("Error allocating memory for d_imageGray\n"); exit(-1); } error = cudaMalloc((void**)&d_Gx, sizeGray); if (error != cudaSuccess) { printf("Error allocating memory for d_Gx\n"); exit(-1); } //imagen convolucion Gy device error = cudaMalloc((void**)&d_Gy, sizeGray); if (error != cudaSuccess) { printf("Error allocating memory for d_Gy\n"); exit(-1); } //imagen final en device Union error = cudaMalloc((void**)&d_G, sizeGray); if (error != cudaSuccess) { printf("Error allocating memory for d_G\n"); exit(-1); } //inicia reloj start = clock(); //---------> CudaMemCpy //carga la imagen inicial h_ImageInit = image.data; error = cudaMemcpy(d_ImageInit, h_ImageInit, size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("Error copiando imagen inicial de host a device\n"); exit(-1); } //mascaras error = cudaMemcpyToSymbol(d_Mask, h_YMask, 3*3*sizeof(char)); if (error != cudaSuccess) { printf("Error copiando mascara Y de host a constante\n"); exit(-1); } //---------> Grises int blockSize = 32; dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(width/float(blockSize)), ceil(height/float(blockSize)), 1); img2grayCU<<<dimGrid,dimBlock>>>(d_ImageInit, width, height, d_imageGray); cudaDeviceSynchronize(); //---------> Convoluciones // Convolution in Gx convolutionCU<<<dimGrid,dimBlock>>>(d_imageGray, height, width, d_Gy); cudaDeviceSynchronize(); //Se copian los datos de la mascara Y del host a la memoria constante error = cudaMemcpyToSymbol(d_Mask, h_XMask, 3*3*sizeof(char)); if (error != cudaSuccess) { printf("Error copiando mascara X de host a constante\n"); exit(-1); } // Convolution in Gy convolutionCU<<<dimGrid,dimBlock>>>(d_imageGray, height, width, d_Gx); cudaDeviceSynchronize(); // Union of Gx and Gy ///// Sobel UnionCU<<<dimGrid,dimBlock>>>(d_G, d_Gx, d_Gy, height, width); cudaDeviceSynchronize(); //Resultado de Union error = cudaMemcpy(h_G, d_G, sizeGray, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("Error copiando resultado del device al host\n"); exit(-1); } //Se obtiene el tiempo final end = clock(); //crea la imagen resultante Mat result_img; result_img.create(height, width, CV_8UC1); result_img.data = h_G; imwrite("imgResult.jpg", result_img); //se calculan tiempos time_used = ((double) (end - start)) /CLOCKS_PER_SEC; printf ("%lf \n",time_used); //liberar memoria free(h_ImageInit); free(h_G); cudaFree(d_ImageInit); cudaFree(d_imageGray); cudaFree(d_Mask); cudaFree(d_Gx); cudaFree(d_Gy); cudaFree(d_G); return 0; }
c667a4c0699d3929a155764821a94c9ae9a43c32.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7) { for (int i=0; i < var_1; ++i) { if (comp > var_4 - +0.0f) { for (int i=0; i < var_2; ++i) { for (int i=0; i < var_3; ++i) { comp = +1.7385E-44f + -1.3910E34f; comp = +1.4863E-36f + var_5 - +1.0883E34f; comp += (-0.0f + -1.9236E-42f * ceilf(var_6 * -1.1919E-44f + var_7 + -1.5851E34f)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8); hipDeviceSynchronize(); return 0; }
c667a4c0699d3929a155764821a94c9ae9a43c32.cu
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7) { for (int i=0; i < var_1; ++i) { if (comp > var_4 - +0.0f) { for (int i=0; i < var_2; ++i) { for (int i=0; i < var_3; ++i) { comp = +1.7385E-44f + -1.3910E34f; comp = +1.4863E-36f + var_5 - +1.0883E34f; comp += (-0.0f + -1.9236E-42f * ceilf(var_6 * -1.1919E-44f + var_7 + -1.5851E34f)); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8); cudaDeviceSynchronize(); return 0; }
d5ddcb1f98b985f91c0c5da3a3a2cf9881324384.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" typedef struct bmpFileHeaderStruct { /* 2 bytes de identificacin */ uint32_t size; /* Tamao del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeaderStruct { uint32_t headersize; /* Tamao de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamao de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; __global__ void BWkernel(unsigned char *img_device, uint32_t n) { float color; color = 0.0f; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { color += img_device[i*3 + 0] * 0.114; color += img_device[i*3 + 1] * 0.587; color += img_device[i*3 + 2] * 0.299; color /= 3; img_device[i*3 + 0] = color; img_device[i*3 + 1] = color; img_device[i*3 + 2] = color; } }
d5ddcb1f98b985f91c0c5da3a3a2cf9881324384.cu
#include "includes.h" typedef struct bmpFileHeaderStruct { /* 2 bytes de identificación */ uint32_t size; /* Tamaño del archivo */ uint16_t resv1; /* Reservado */ uint16_t resv2; /* Reservado */ uint32_t offset; /* Offset hasta hasta los datos de imagen */ } bmpFileHeader; typedef struct bmpInfoHeaderStruct { uint32_t headersize; /* Tamaño de la cabecera */ uint32_t width; /* Ancho */ uint32_t height; /* Alto */ uint16_t planes; /* Planos de color (Siempre 1) */ uint16_t bpp; /* bits por pixel */ uint32_t compress; /* compresion */ uint32_t imgsize; /* tamaño de los datos de imagen */ uint32_t bpmx; /* Resolucion X en bits por metro */ uint32_t bpmy; /* Resolucion Y en bits por metro */ uint32_t colors; /* colors used en la paleta */ uint32_t imxtcolors; /* Colores importantes. 0 si son todos */ } bmpInfoHeader; __global__ void BWkernel(unsigned char *img_device, uint32_t n) { float color; color = 0.0f; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { color += img_device[i*3 + 0] * 0.114; color += img_device[i*3 + 1] * 0.587; color += img_device[i*3 + 2] * 0.299; color /= 3; img_device[i*3 + 0] = color; img_device[i*3 + 1] = color; img_device[i*3 + 2] = color; } }
4691c537bffc21e8c184df63c027ae2083cb901f.hip
// !!! This is a file automatically generated by hipify!!! //Example 1. Application Using C and cuBLAS: 1-based indexing //----------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <hip/hip_runtime.h> #include "rocblas.h" #define M 6 #define N 5 #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) static __inline__ void modify (hipblasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ hipblasSscal (handle, n-q+1, &alpha, &m[IDX2F(p,q,ldm)], ldm); hipblasSscal (handle, ldm-p+1, &beta, &m[IDX2F(p,q,ldm)], 1); } int main (void){ hipError_t cudaStat; hipblasStatus_t stat; hipblasHandle_t handle; int i, j; float* devPtrA; float* a = 0; a = (float *)malloc (M * N * sizeof (*a)); if (!a) { printf ("host memory allocation failed"); return EXIT_FAILURE; } for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { a[IDX2F(i,j,M)] = (float)((i-1) * N + j); } } cudaStat = hipMalloc ((void**)&devPtrA, M*N*sizeof(*a)); if (cudaStat != hipSuccess) { printf ("device memory allocation failed"); return EXIT_FAILURE; } stat = hipblasCreate(&handle); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } stat = hipblasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("data download failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f); stat = hipblasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M); if (stat != HIPBLAS_STATUS_SUCCESS) { printf ("data upload failed"); hipFree (devPtrA); hipblasDestroy(handle); return EXIT_FAILURE; } hipFree (devPtrA); hipblasDestroy(handle); for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { printf ("%7.0f", a[IDX2F(i,j,M)]); } printf ("\n"); } free(a); return EXIT_SUCCESS; }
4691c537bffc21e8c184df63c027ae2083cb901f.cu
//Example 1. Application Using C and cuBLAS: 1-based indexing //----------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include "cublas_v2.h" #define M 6 #define N 5 #define IDX2F(i,j,ld) ((((j)-1)*(ld))+((i)-1)) static __inline__ void modify (cublasHandle_t handle, float *m, int ldm, int n, int p, int q, float alpha, float beta){ cublasSscal (handle, n-q+1, &alpha, &m[IDX2F(p,q,ldm)], ldm); cublasSscal (handle, ldm-p+1, &beta, &m[IDX2F(p,q,ldm)], 1); } int main (void){ cudaError_t cudaStat; cublasStatus_t stat; cublasHandle_t handle; int i, j; float* devPtrA; float* a = 0; a = (float *)malloc (M * N * sizeof (*a)); if (!a) { printf ("host memory allocation failed"); return EXIT_FAILURE; } for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { a[IDX2F(i,j,M)] = (float)((i-1) * N + j); } } cudaStat = cudaMalloc ((void**)&devPtrA, M*N*sizeof(*a)); if (cudaStat != cudaSuccess) { printf ("device memory allocation failed"); return EXIT_FAILURE; } stat = cublasCreate(&handle); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("CUBLAS initialization failed\n"); return EXIT_FAILURE; } stat = cublasSetMatrix (M, N, sizeof(*a), a, M, devPtrA, M); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("data download failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } modify (handle, devPtrA, M, N, 2, 3, 16.0f, 12.0f); stat = cublasGetMatrix (M, N, sizeof(*a), devPtrA, M, a, M); if (stat != CUBLAS_STATUS_SUCCESS) { printf ("data upload failed"); cudaFree (devPtrA); cublasDestroy(handle); return EXIT_FAILURE; } cudaFree (devPtrA); cublasDestroy(handle); for (j = 1; j <= N; j++) { for (i = 1; i <= M; i++) { printf ("%7.0f", a[IDX2F(i,j,M)]); } printf ("\n"); } free(a); return EXIT_SUCCESS; }
ddd5bcb17a329eaf81c726183621438a3122dff8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "mathutil_cuda_kernel.h" dim3 cuda_gridsize(int n){ int k = (n - 1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535) { x = ceil(sqrt(k)); y = (n-1) / (x * BLOCK) + 1; } dim3 d(x, y, 1); return d; } __global__ void broadcast_sum_kernel(float *a, float *b, int x, int y, int size){ int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if(i >= size) return; int j = i % y; i /= y; int k = i % x; a[IDX2D(k, j, y)] += b[k]; } void broadcast_sum_cuda(float *a, float *b, int x, int y, hipStream_t stream){ int size = x * y; hipError_t err; hipLaunchKernelGGL(( broadcast_sum_kernel), dim3(cuda_gridsize(size)), dim3(BLOCK), 0, stream, a, b, x, y, size); err = hipGetLastError(); if (hipSuccess != err){ fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
ddd5bcb17a329eaf81c726183621438a3122dff8.cu
#include <curand.h> #include <stdio.h> #include <math.h> #include <float.h> #include "mathutil_cuda_kernel.h" dim3 cuda_gridsize(int n){ int k = (n - 1) / BLOCK + 1; int x = k; int y = 1; if(x > 65535) { x = ceil(sqrt(k)); y = (n-1) / (x * BLOCK) + 1; } dim3 d(x, y, 1); return d; } __global__ void broadcast_sum_kernel(float *a, float *b, int x, int y, int size){ int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; if(i >= size) return; int j = i % y; i /= y; int k = i % x; a[IDX2D(k, j, y)] += b[k]; } void broadcast_sum_cuda(float *a, float *b, int x, int y, cudaStream_t stream){ int size = x * y; cudaError_t err; broadcast_sum_kernel<<<cuda_gridsize(size), BLOCK, 0, stream>>>(a, b, x, y, size); err = cudaGetLastError(); if (cudaSuccess != err){ fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
bd35c9298956a9321d51cd068e83f5859f91b30f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p2.cu -o assignment5-p2 #include <cmath> #include <cstdint> #include <iostream> #include <sys/time.h> #define THRESHOLD (0.000001) #define SIZE1 4096 #define SIZE2 4097 #define ITER 100 #define BLOCK_SIZE 16 using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel1(double* A) { // SB: Write the first kernel here int j = blockIdx.x*blockDim.x + threadIdx.x; if(j < SIZE1 - 1) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE1; i++) { A[i*SIZE1 + j + 1] = A[(i - 1)*SIZE1 + j + 1] + A[i*SIZE1 + j + 1]; } } } } __global__ void kernel2(double* A) { // SB: Write the second kernel here int j = blockIdx.x*blockDim.x + threadIdx.x; if(j < SIZE2 - 1) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE2; i++) { A[i*SIZE2 + j + 1] = A[(i - 1)*SIZE2 + j + 1] + A[i*SIZE2 + j + 1]; } } } } __host__ void serial(double** A) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE1; i++) { for (int j = 0; j < SIZE1 - 1; j++) { A[i][j + 1] = A[i - 1][j + 1] + A[i][j + 1]; } } } } __host__ void check_result(double** w_ref, double* w_opt, uint64_t size) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < size; i++) { for (uint64_t j = 0; j < size; j++) { this_diff = w_ref[i][j] - w_opt[i*size + j]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions" << endl; } } __host__ double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { double** A_ser = new double*[SIZE1]; double* A_k1 = new double[SIZE1*SIZE1]; double* A_k2 = new double[SIZE2*SIZE2]; for (int i = 0; i < SIZE1; i++) { A_ser[i] = new double[SIZE1]; } for (int i = 0; i < SIZE1; i++) { for (int j = 0; j < SIZE1; j++) { A_ser[i][j] = i + j; A_k1[i*SIZE1 + j] = i + j; } } for (int i = 0; i < SIZE2; i++) { for (int j = 0; j < SIZE2; j++) { A_k2[i*SIZE2 + j] = i + j; } } double clkbegin, clkend; double t; clkbegin = rtclock(); serial(A_ser); clkend = rtclock(); t = clkend - clkbegin; cout << "Serial code on CPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9) << " GFLOPS; Time = " << t * 1000 << " msec" << endl; hipEvent_t start, end; gpuErrchk( hipEventCreate(&start) ); gpuErrchk( hipEventCreate(&end) ); // SB: Write your first GPU kernel here double* A_k1_c; gpuErrchk( hipMalloc((void**)&A_k1_c, SIZE1*SIZE1*sizeof(double)) ); gpuErrchk( hipEventRecord(start, 0) ); gpuErrchk( hipMemcpy(A_k1_c, A_k1, SIZE1*SIZE1*sizeof(double), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( kernel1), dim3(4), dim3(1024), 0, 0, A_k1_c); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipMemcpy(A_k1, A_k1_c, SIZE1*SIZE1*sizeof(double), hipMemcpyDeviceToHost) ); gpuErrchk( hipEventRecord(end, 0) ); gpuErrchk( hipDeviceSynchronize() ); float kernel_time = 0; gpuErrchk( hipEventElapsedTime(&kernel_time, start, end) ); check_result(A_ser, A_k1, SIZE1); cout << "Kernel 1 on GPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9) << " GFLOPS; Time = " << kernel_time << " msec" << endl; // SB: Write your second GPU kernel here double* A_k2_c; gpuErrchk( hipMalloc((void**)&A_k2_c, SIZE2*SIZE2*sizeof(double)) ); gpuErrchk( hipEventRecord(start, 0) ); gpuErrchk( hipMemcpy(A_k2_c, A_k2, SIZE2*SIZE2*sizeof(double), hipMemcpyHostToDevice) ); hipLaunchKernelGGL(( kernel2), dim3(4), dim3(1024), 0, 0, A_k2_c); gpuErrchk( hipPeekAtLastError() ); gpuErrchk( hipMemcpy(A_k2, A_k2_c, SIZE2*SIZE2*sizeof(double), hipMemcpyDeviceToHost) ); gpuErrchk( hipEventRecord(end, 0) ); gpuErrchk( hipDeviceSynchronize() ); kernel_time = 0; gpuErrchk( hipEventElapsedTime(&kernel_time, start, end) ); // check_result(A_ser, A_k2, SIZE1); cout << "Kernel 2 on GPU: " << (1.0 * SIZE2 * SIZE2 * ITER / t / 1.0e9) << " GFLOPS; Time = " << kernel_time << " msec" << endl; gpuErrchk( hipFree(A_k1_c) ); gpuErrchk( hipFree(A_k2_c) ); free(A_ser); free(A_k1); free(A_k2); return EXIT_SUCCESS; }
bd35c9298956a9321d51cd068e83f5859f91b30f.cu
// Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p2.cu -o assignment5-p2 #include <cmath> #include <cstdint> #include <iostream> #include <sys/time.h> #define THRESHOLD (0.000001) #define SIZE1 4096 #define SIZE2 4097 #define ITER 100 #define BLOCK_SIZE 16 using namespace std; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel1(double* A) { // SB: Write the first kernel here int j = blockIdx.x*blockDim.x + threadIdx.x; if(j < SIZE1 - 1) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE1; i++) { A[i*SIZE1 + j + 1] = A[(i - 1)*SIZE1 + j + 1] + A[i*SIZE1 + j + 1]; } } } } __global__ void kernel2(double* A) { // SB: Write the second kernel here int j = blockIdx.x*blockDim.x + threadIdx.x; if(j < SIZE2 - 1) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE2; i++) { A[i*SIZE2 + j + 1] = A[(i - 1)*SIZE2 + j + 1] + A[i*SIZE2 + j + 1]; } } } } __host__ void serial(double** A) { for (int k = 0; k < ITER; k++) { for (int i = 1; i < SIZE1; i++) { for (int j = 0; j < SIZE1 - 1; j++) { A[i][j + 1] = A[i - 1][j + 1] + A[i][j + 1]; } } } } __host__ void check_result(double** w_ref, double* w_opt, uint64_t size) { double maxdiff = 0.0, this_diff = 0.0; int numdiffs = 0; for (uint64_t i = 0; i < size; i++) { for (uint64_t j = 0; j < size; j++) { this_diff = w_ref[i][j] - w_opt[i*size + j]; if (fabs(this_diff) > THRESHOLD) { numdiffs++; if (this_diff > maxdiff) maxdiff = this_diff; } } } if (numdiffs > 0) { cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff << endl; } else { cout << "No differences found between base and test versions" << endl; } } __host__ double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { cout << "Error return from gettimeofday: " << stat << "\n"; } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } int main() { double** A_ser = new double*[SIZE1]; double* A_k1 = new double[SIZE1*SIZE1]; double* A_k2 = new double[SIZE2*SIZE2]; for (int i = 0; i < SIZE1; i++) { A_ser[i] = new double[SIZE1]; } for (int i = 0; i < SIZE1; i++) { for (int j = 0; j < SIZE1; j++) { A_ser[i][j] = i + j; A_k1[i*SIZE1 + j] = i + j; } } for (int i = 0; i < SIZE2; i++) { for (int j = 0; j < SIZE2; j++) { A_k2[i*SIZE2 + j] = i + j; } } double clkbegin, clkend; double t; clkbegin = rtclock(); serial(A_ser); clkend = rtclock(); t = clkend - clkbegin; cout << "Serial code on CPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9) << " GFLOPS; Time = " << t * 1000 << " msec" << endl; cudaEvent_t start, end; gpuErrchk( cudaEventCreate(&start) ); gpuErrchk( cudaEventCreate(&end) ); // SB: Write your first GPU kernel here double* A_k1_c; gpuErrchk( cudaMalloc((void**)&A_k1_c, SIZE1*SIZE1*sizeof(double)) ); gpuErrchk( cudaEventRecord(start, 0) ); gpuErrchk( cudaMemcpy(A_k1_c, A_k1, SIZE1*SIZE1*sizeof(double), cudaMemcpyHostToDevice) ); kernel1<<<4, 1024>>>(A_k1_c); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(A_k1, A_k1_c, SIZE1*SIZE1*sizeof(double), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaEventRecord(end, 0) ); gpuErrchk( cudaDeviceSynchronize() ); float kernel_time = 0; gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) ); check_result(A_ser, A_k1, SIZE1); cout << "Kernel 1 on GPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9) << " GFLOPS; Time = " << kernel_time << " msec" << endl; // SB: Write your second GPU kernel here double* A_k2_c; gpuErrchk( cudaMalloc((void**)&A_k2_c, SIZE2*SIZE2*sizeof(double)) ); gpuErrchk( cudaEventRecord(start, 0) ); gpuErrchk( cudaMemcpy(A_k2_c, A_k2, SIZE2*SIZE2*sizeof(double), cudaMemcpyHostToDevice) ); kernel2<<<4, 1024>>>(A_k2_c); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(A_k2, A_k2_c, SIZE2*SIZE2*sizeof(double), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaEventRecord(end, 0) ); gpuErrchk( cudaDeviceSynchronize() ); kernel_time = 0; gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) ); // check_result(A_ser, A_k2, SIZE1); cout << "Kernel 2 on GPU: " << (1.0 * SIZE2 * SIZE2 * ITER / t / 1.0e9) << " GFLOPS; Time = " << kernel_time << " msec" << endl; gpuErrchk( cudaFree(A_k1_c) ); gpuErrchk( cudaFree(A_k2_c) ); free(A_ser); free(A_k1); free(A_k2); return EXIT_SUCCESS; }
f569971631bdf4aa48a198be75f440ca08c47858.hip
// !!! This is a file automatically generated by hipify!!! /* * Parallels and Distributed Systems Exercise 3 * v2. CUDA modified ising model evolution ,grid and block computes the magnetic moments. * Author:Michael Karatzas * AEM:9137 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "ising.h" #include "essentials.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" //The max threads per block for my gpu (gt 540m) is 1024 so it must be BLOCK_DIM_X* BLOCK_DIM_Y<=1024 //(Preferably:set BLOCK_DIM_X and BLOCK_DIM_Y a multiple of 4) #define BLOCK_DIM_X 24 #define BLOCK_DIM_Y 24 #define GRID_DIM_X 4 #define GRID_DIM_Y 4 //Functions'-kernels' Declarations __global__ void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag); __device__ __forceinline__ void getTheSpin(int * Lat,int * newLat, double * weights , int n, int rowIndex, int colIndex, int * flag); ///Functions'-kernels' Definitions void ising( int *G, double *w, int k, int n){ //Flag for indicate if there was no changes in the lattice during a step,in order to terminate the evolving. int no_changes_flag; int * d_G, *d_secondG, *d_no_changes_flag; double * d_w; //Allocate memory for the no change flag in the Device if( hipMalloc(&d_no_changes_flag, (size_t)sizeof(int)) != hipSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } //Allocate memory and "transfer" the G Matrix in the Device if( hipMalloc((void **)&d_G, (size_t)sizeof(int)*n*n) != hipSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } hipMemcpy(d_G, G, (size_t)sizeof(int)*n*n, hipMemcpyHostToDevice); //Allocate memory and "transfer" the Weights' Matrix in the Device if( hipMalloc((void **)&d_w, (size_t)sizeof(double)*5*5) != hipSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } hipMemcpy(d_w, w, (size_t)sizeof(double)*5*5, hipMemcpyHostToDevice); //Allocate memory for the second G matrix only in GPU(device) if(hipMalloc((void **)&d_secondG, (size_t)sizeof(int)*n*n) != hipSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } //grid and block dimensions in order one thread to compute a set of moments. dim3 dimBlock(BLOCK_DIM_X,BLOCK_DIM_Y); dim3 dimGrid(GRID_DIM_X,GRID_DIM_Y); //Evolving the model for k steps for(int i=0 ; i<k ;i++){ /*no_changes_flag=1, indicates no change in the lattice, if there are changes nextStateCalculation() kernel will update its value.*/ no_changes_flag=1; hipMemcpy(d_no_changes_flag, &no_changes_flag, (size_t)sizeof(int), hipMemcpyHostToDevice); //calling the nextStateCalculation() kernel hipLaunchKernelGGL(( nextStateCalculation), dim3(dimGrid),dim3(dimBlock), 0, 0, d_G,d_secondG,d_w,n,d_no_changes_flag); hipDeviceSynchronize(); //Swapping the pointers between the two Matrices in device pointer_swap(&d_G,&d_secondG); //The host get the value of the no changes flag as indication if no changes happened during the step. hipMemcpy(&no_changes_flag, d_no_changes_flag, (size_t)sizeof(int), hipMemcpyDeviceToHost); //If there are no changes in the lattice we stop evolving the model if(no_changes_flag){ break; } } //Passing updated values of G matrix in the host(CPU). hipMemcpy(G,d_G,(size_t)sizeof(int)*n*n,hipMemcpyDeviceToHost); //Freeing memory space I dont need from GPU to avoid memory leaks. hipFree(d_G); hipFree(d_secondG); hipFree(d_w); } __global__ void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag){ //The step of each thread int strideX = blockDim.x *gridDim.x ; int strideY = blockDim.y *gridDim.y ; //The unigue global indixes of the threads in the grid int index_X = threadIdx.x +blockDim.x*blockIdx.x; int index_Y = threadIdx.y +blockDim.y*blockIdx.y; //Each thread loops in order to compute the spin of its own points for(int i=index_Y;i<n ;i+=strideY){ for(int j=index_X; j<n;j+=strideX){ getTheSpin(Gptr,newMat,w,n,i,j,flag); } } } __device__ __forceinline__ void getTheSpin(int * Lat,int * newLat, double * weights , int n, int rowIndex, int colIndex, int * flag){ double total=0; int idxR,idxC; //Calculating the Total influence for a certain spot for(int i=rowIndex-2;i<rowIndex+3;i++ ){ for(int j=colIndex-2;j<colIndex+3;j++ ){ if((i==rowIndex) && (j==colIndex)) continue; //using modulus arithmetic for handle the boundaries' conditions //Getting the positive modulus idxR= (i + n) % n ; idxC= (j + n) % n ; //Total influence update total+=Lat[ idxR*n + idxC] *weights[(2+i-rowIndex)*5 + (2+j-colIndex)]; } } //Checking the conditions in order to get the next state spin //if (total ==0), with taking into account possible floating point errors if( (total<1e-6) && (total>(-1e-6)) ){ newLat[rowIndex*n+colIndex]=Lat[rowIndex*n+colIndex]; } //if change in a certain spot happens we update no change flag's value into 0. else if(total<0){ //Checking if there is change in this certain spot if(Lat[rowIndex*n+colIndex]!=1) *flag=0; newLat[rowIndex*n+colIndex]=-1; } else if(total>0){ //Checking if there is change in this certain spot if(Lat[rowIndex*n+colIndex]!=-1) *flag=0; newLat[rowIndex*n+colIndex]=1; } }
f569971631bdf4aa48a198be75f440ca08c47858.cu
/* * Parallels and Distributed Systems Exercise 3 * v2. CUDA modified ising model evolution ,grid and block computes the magnetic moments. * Author:Michael Karatzas * AEM:9137 */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "ising.h" #include "essentials.h" #include "cuda.h" #include "cuda_runtime.h" #include "cuda_runtime_api.h" //The max threads per block for my gpu (gt 540m) is 1024 so it must be BLOCK_DIM_X* BLOCK_DIM_Y<=1024 //(Preferably:set BLOCK_DIM_X and BLOCK_DIM_Y a multiple of 4) #define BLOCK_DIM_X 24 #define BLOCK_DIM_Y 24 #define GRID_DIM_X 4 #define GRID_DIM_Y 4 //Functions'-kernels' Declarations __global__ void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag); __device__ __forceinline__ void getTheSpin(int * Lat,int * newLat, double * weights , int n, int rowIndex, int colIndex, int * flag); ///Functions'-kernels' Definitions void ising( int *G, double *w, int k, int n){ //Flag for indicate if there was no changes in the lattice during a step,in order to terminate the evolving. int no_changes_flag; int * d_G, *d_secondG, *d_no_changes_flag; double * d_w; //Allocate memory for the no change flag in the Device if( cudaMalloc(&d_no_changes_flag, (size_t)sizeof(int)) != cudaSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } //Allocate memory and "transfer" the G Matrix in the Device if( cudaMalloc((void **)&d_G, (size_t)sizeof(int)*n*n) != cudaSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } cudaMemcpy(d_G, G, (size_t)sizeof(int)*n*n, cudaMemcpyHostToDevice); //Allocate memory and "transfer" the Weights' Matrix in the Device if( cudaMalloc((void **)&d_w, (size_t)sizeof(double)*5*5) != cudaSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } cudaMemcpy(d_w, w, (size_t)sizeof(double)*5*5, cudaMemcpyHostToDevice); //Allocate memory for the second G matrix only in GPU(device) if(cudaMalloc((void **)&d_secondG, (size_t)sizeof(int)*n*n) != cudaSuccess){ printf("Couldn't allocate memory in device (GPU) !"); exit(1); } //grid and block dimensions in order one thread to compute a set of moments. dim3 dimBlock(BLOCK_DIM_X,BLOCK_DIM_Y); dim3 dimGrid(GRID_DIM_X,GRID_DIM_Y); //Evolving the model for k steps for(int i=0 ; i<k ;i++){ /*no_changes_flag=1, indicates no change in the lattice, if there are changes nextStateCalculation() kernel will update its value.*/ no_changes_flag=1; cudaMemcpy(d_no_changes_flag, &no_changes_flag, (size_t)sizeof(int), cudaMemcpyHostToDevice); //calling the nextStateCalculation() kernel nextStateCalculation<<<dimGrid,dimBlock>>>(d_G,d_secondG,d_w,n,d_no_changes_flag); cudaDeviceSynchronize(); //Swapping the pointers between the two Matrices in device pointer_swap(&d_G,&d_secondG); //The host get the value of the no changes flag as indication if no changes happened during the step. cudaMemcpy(&no_changes_flag, d_no_changes_flag, (size_t)sizeof(int), cudaMemcpyDeviceToHost); //If there are no changes in the lattice we stop evolving the model if(no_changes_flag){ break; } } //Passing updated values of G matrix in the host(CPU). cudaMemcpy(G,d_G,(size_t)sizeof(int)*n*n,cudaMemcpyDeviceToHost); //Freeing memory space I dont need from GPU to avoid memory leaks. cudaFree(d_G); cudaFree(d_secondG); cudaFree(d_w); } __global__ void nextStateCalculation(int *Gptr,int *newMat, double * w , int n, int * flag){ //The step of each thread int strideX = blockDim.x *gridDim.x ; int strideY = blockDim.y *gridDim.y ; //The unigue global indixes of the threads in the grid int index_X = threadIdx.x +blockDim.x*blockIdx.x; int index_Y = threadIdx.y +blockDim.y*blockIdx.y; //Each thread loops in order to compute the spin of its own points for(int i=index_Y;i<n ;i+=strideY){ for(int j=index_X; j<n;j+=strideX){ getTheSpin(Gptr,newMat,w,n,i,j,flag); } } } __device__ __forceinline__ void getTheSpin(int * Lat,int * newLat, double * weights , int n, int rowIndex, int colIndex, int * flag){ double total=0; int idxR,idxC; //Calculating the Total influence for a certain spot for(int i=rowIndex-2;i<rowIndex+3;i++ ){ for(int j=colIndex-2;j<colIndex+3;j++ ){ if((i==rowIndex) && (j==colIndex)) continue; //using modulus arithmetic for handle the boundaries' conditions //Getting the positive modulus idxR= (i + n) % n ; idxC= (j + n) % n ; //Total influence update total+=Lat[ idxR*n + idxC] *weights[(2+i-rowIndex)*5 + (2+j-colIndex)]; } } //Checking the conditions in order to get the next state spin //if (total ==0), with taking into account possible floating point errors if( (total<1e-6) && (total>(-1e-6)) ){ newLat[rowIndex*n+colIndex]=Lat[rowIndex*n+colIndex]; } //if change in a certain spot happens we update no change flag's value into 0. else if(total<0){ //Checking if there is change in this certain spot if(Lat[rowIndex*n+colIndex]!=1) *flag=0; newLat[rowIndex*n+colIndex]=-1; } else if(total>0){ //Checking if there is change in this certain spot if(Lat[rowIndex*n+colIndex]!=-1) *flag=0; newLat[rowIndex*n+colIndex]=1; } }
9bdb701e9301e8831a1db865b4da60be2566f1d4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <limits> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/stats/minmax.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <stdio.h> #include <stdlib.h> namespace raft { namespace stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax( const T* data, int nrows, int ncols, T* globalmin, T* globalmax, hipStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); hipLaunchKernelGGL(( naiveMinMaxInitKernel), dim3(nblks), dim3(TPB), 0, stream, ncols, globalmin, globalmax, init_val); RAFT_CUDA_TRY(hipGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); hipLaunchKernelGGL(( naiveMinMaxKernel), dim3(nblks), dim3(TPB), 0, stream, data, nrows, ncols, globalmin, globalmax); RAFT_CUDA_TRY(hipGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: MinMaxTest() : minmax_act(0, resource::get_cuda_stream(handle)), minmax_ref(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int len = params.rows * params.cols; rmm::device_uvector<T> data(len, stream); rmm::device_uvector<bool> mask(len, stream); minmax_act.resize(2 * params.cols, stream); minmax_ref.resize(2 * params.cols, stream); normal(handle, r, data.data(), len, (T)0.0, (T)1.0); T nan_prob = 0.01; bernoulli(handle, r, mask.data(), len, nan_prob); const int TPB = 256; hipLaunchKernelGGL(( nanKernel), dim3(raft::ceildiv(len, TPB)), dim3(TPB), 0, stream, data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN()); RAFT_CUDA_TRY(hipPeekAtLastError()); naiveMinMax(data.data(), params.rows, params.cols, minmax_ref.data(), minmax_ref.data() + params.cols, stream); raft::stats::minmax<T, int>( handle, raft::make_device_matrix_view<const T, int, raft::layout_f_contiguous>( data.data(), params.rows, params.cols), std::nullopt, std::nullopt, raft::make_device_vector_view<T, int>(minmax_act.data(), params.cols), raft::make_device_vector_view<T, int>(minmax_act.data() + params.cols, params.cols), std::nullopt); } protected: raft::resources handle; MinMaxInputs<T> params; rmm::device_uvector<T> minmax_act; rmm::device_uvector<T> minmax_ref; }; const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
9bdb701e9301e8831a1db865b4da60be2566f1d4.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "../test_utils.cuh" #include <gtest/gtest.h> #include <limits> #include <raft/core/device_mdspan.hpp> #include <raft/core/resource/cuda_stream.hpp> #include <raft/core/resources.hpp> #include <raft/random/rng.cuh> #include <raft/stats/minmax.cuh> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <stdio.h> #include <stdlib.h> namespace raft { namespace stats { ///@todo: need to add tests for verifying the column subsampling feature template <typename T> struct MinMaxInputs { T tolerance; int rows, cols; unsigned long long int seed; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const MinMaxInputs<T>& dims) { return os; } template <typename T> __global__ void naiveMinMaxInitKernel(int ncols, T* globalmin, T* globalmax, T init_val) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= ncols) return; globalmin[tid] = init_val; globalmax[tid] = -init_val; } template <typename T> __global__ void naiveMinMaxKernel(const T* data, int nrows, int ncols, T* globalmin, T* globalmax) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int col = tid / nrows; if (col < ncols) { T val = data[tid]; if (!isnan(val)) { raft::myAtomicMin(&globalmin[col], val); raft::myAtomicMax(&globalmax[col], val); } } } template <typename T> void naiveMinMax( const T* data, int nrows, int ncols, T* globalmin, T* globalmax, cudaStream_t stream) { const int TPB = 128; int nblks = raft::ceildiv(ncols, TPB); T init_val = std::numeric_limits<T>::max(); naiveMinMaxInitKernel<<<nblks, TPB, 0, stream>>>(ncols, globalmin, globalmax, init_val); RAFT_CUDA_TRY(cudaGetLastError()); nblks = raft::ceildiv(nrows * ncols, TPB); naiveMinMaxKernel<<<nblks, TPB, 0, stream>>>(data, nrows, ncols, globalmin, globalmax); RAFT_CUDA_TRY(cudaGetLastError()); } template <typename T> __global__ void nanKernel(T* data, const bool* mask, int len, T nan) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid >= len) return; if (!mask[tid]) data[tid] = nan; } template <typename T> class MinMaxTest : public ::testing::TestWithParam<MinMaxInputs<T>> { protected: MinMaxTest() : minmax_act(0, resource::get_cuda_stream(handle)), minmax_ref(0, resource::get_cuda_stream(handle)) { } void SetUp() override { auto stream = resource::get_cuda_stream(handle); params = ::testing::TestWithParam<MinMaxInputs<T>>::GetParam(); raft::random::RngState r(params.seed); int len = params.rows * params.cols; rmm::device_uvector<T> data(len, stream); rmm::device_uvector<bool> mask(len, stream); minmax_act.resize(2 * params.cols, stream); minmax_ref.resize(2 * params.cols, stream); normal(handle, r, data.data(), len, (T)0.0, (T)1.0); T nan_prob = 0.01; bernoulli(handle, r, mask.data(), len, nan_prob); const int TPB = 256; nanKernel<<<raft::ceildiv(len, TPB), TPB, 0, stream>>>( data.data(), mask.data(), len, std::numeric_limits<T>::quiet_NaN()); RAFT_CUDA_TRY(cudaPeekAtLastError()); naiveMinMax(data.data(), params.rows, params.cols, minmax_ref.data(), minmax_ref.data() + params.cols, stream); raft::stats::minmax<T, int>( handle, raft::make_device_matrix_view<const T, int, raft::layout_f_contiguous>( data.data(), params.rows, params.cols), std::nullopt, std::nullopt, raft::make_device_vector_view<T, int>(minmax_act.data(), params.cols), raft::make_device_vector_view<T, int>(minmax_act.data() + params.cols, params.cols), std::nullopt); } protected: raft::resources handle; MinMaxInputs<T> params; rmm::device_uvector<T> minmax_act; rmm::device_uvector<T> minmax_ref; }; const std::vector<MinMaxInputs<float>> inputsf = {{0.00001f, 1024, 32, 1234ULL}, {0.00001f, 1024, 64, 1234ULL}, {0.00001f, 1024, 128, 1234ULL}, {0.00001f, 1024, 256, 1234ULL}, {0.00001f, 1024, 512, 1234ULL}, {0.00001f, 1024, 1024, 1234ULL}, {0.00001f, 4096, 32, 1234ULL}, {0.00001f, 4096, 64, 1234ULL}, {0.00001f, 4096, 128, 1234ULL}, {0.00001f, 4096, 256, 1234ULL}, {0.00001f, 4096, 512, 1234ULL}, {0.00001f, 4096, 1024, 1234ULL}, {0.00001f, 8192, 32, 1234ULL}, {0.00001f, 8192, 64, 1234ULL}, {0.00001f, 8192, 128, 1234ULL}, {0.00001f, 8192, 256, 1234ULL}, {0.00001f, 8192, 512, 1234ULL}, {0.00001f, 8192, 1024, 1234ULL}, {0.00001f, 1024, 8192, 1234ULL}}; const std::vector<MinMaxInputs<double>> inputsd = {{0.0000001, 1024, 32, 1234ULL}, {0.0000001, 1024, 64, 1234ULL}, {0.0000001, 1024, 128, 1234ULL}, {0.0000001, 1024, 256, 1234ULL}, {0.0000001, 1024, 512, 1234ULL}, {0.0000001, 1024, 1024, 1234ULL}, {0.0000001, 4096, 32, 1234ULL}, {0.0000001, 4096, 64, 1234ULL}, {0.0000001, 4096, 128, 1234ULL}, {0.0000001, 4096, 256, 1234ULL}, {0.0000001, 4096, 512, 1234ULL}, {0.0000001, 4096, 1024, 1234ULL}, {0.0000001, 8192, 32, 1234ULL}, {0.0000001, 8192, 64, 1234ULL}, {0.0000001, 8192, 128, 1234ULL}, {0.0000001, 8192, 256, 1234ULL}, {0.0000001, 8192, 512, 1234ULL}, {0.0000001, 8192, 1024, 1234ULL}, {0.0000001, 1024, 8192, 1234ULL}}; typedef MinMaxTest<float> MinMaxTestF; TEST_P(MinMaxTestF, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<float>(params.tolerance))); } typedef MinMaxTest<double> MinMaxTestD; TEST_P(MinMaxTestD, Result) { ASSERT_TRUE(raft::devArrMatch(minmax_ref.data(), minmax_act.data(), 2 * params.cols, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestF, ::testing::ValuesIn(inputsf)); INSTANTIATE_TEST_CASE_P(MinMaxTests, MinMaxTestD, ::testing::ValuesIn(inputsd)); } // end namespace stats } // end namespace raft
3b415f3dfe8a01c8ec5516a8e9ff3a13d32155b9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*! * Copyright 2017-2020 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr) : d_data{data}, d_row_ptr{row_ptr} {} }; struct SparsePageLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; bst_feature_t num_features; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(data.d_row_ptr), d_data(data.d_data), num_features(num_features), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) const { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetFvalue(int ridx, int fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; struct CuPyAdapterLoader { data::CupyAdapterBatch batch; bst_feature_t columns; float* smem; bool use_shared; DEV_INLINE CuPyAdapterLoader(data::CupyAdapterBatch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; struct CuDFAdapterLoader { data::CudfAdapterBatch batch; bst_feature_t columns; float* smem; bool use_shared; DEV_INLINE CuDFAdapterLoader(data::CudfAdapterBatch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { for (size_t i = 0; i < columns; ++i) { smem[threadIdx.x * columns + i] = batch.GetValue(global_idx, i); } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetValue(ridx, fidx); } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: dh::device_vector<RegTree::Node> nodes; dh::device_vector<size_t> tree_segments; dh::device_vector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void CopyModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<RegTree::Node>& h_nodes, size_t tree_begin, size_t tree_end) { nodes.resize(h_nodes.size()); dh::safe_cuda(hipMemcpyAsync(nodes.data().get(), h_nodes.data(), sizeof(RegTree::Node) * h_nodes.size(), hipMemcpyHostToDevice)); tree_segments.resize(h_tree_segments.size()); dh::safe_cuda(hipMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), hipMemcpyHostToDevice)); tree_group.resize(model.tree_info.size()); dh::safe_cuda(hipMemcpyAsync(tree_group.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), hipMemcpyHostToDevice)); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(hipSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()}; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<EllpackPage>()) { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } else { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(hipSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader, typename Batch> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<Adapter>(x); CHECK_EQ(m.NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m.DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m.DeviceIdx(); MetaInfo info; info.num_col_ = m.NumColumns(); info.num_row_ = m.NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m.NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, Batch>, m.Value(), dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(), dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group), tree_begin, tree_end, m.NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(data::CupyAdapter)) { this->DispatchedInplacePredict<data::CupyAdapter, CuPyAdapterLoader, data::CupyAdapterBatch>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(data::CudfAdapter)) { this->DispatchedInplacePredict<data::CudfAdapter, CuDFAdapterLoader, data::CudfAdapterBatch>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
3b415f3dfe8a01c8ec5516a8e9ff3a13d32155b9.cu
/*! * Copyright 2017-2020 by Contributors */ #include <thrust/copy.h> #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <memory> #include "xgboost/data.h" #include "xgboost/predictor.h" #include "xgboost/tree_model.h" #include "xgboost/tree_updater.h" #include "xgboost/host_device_vector.h" #include "../gbm/gbtree_model.h" #include "../data/ellpack_page.cuh" #include "../data/device_adapter.cuh" #include "../common/common.h" #include "../common/device_helpers.cuh" namespace xgboost { namespace predictor { DMLC_REGISTRY_FILE_TAG(gpu_predictor); struct SparsePageView { common::Span<const Entry> d_data; common::Span<const bst_row_t> d_row_ptr; XGBOOST_DEVICE SparsePageView(common::Span<const Entry> data, common::Span<const bst_row_t> row_ptr) : d_data{data}, d_row_ptr{row_ptr} {} }; struct SparsePageLoader { bool use_shared; common::Span<const bst_row_t> d_row_ptr; common::Span<const Entry> d_data; bst_feature_t num_features; float* smem; size_t entry_start; __device__ SparsePageLoader(SparsePageView data, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : use_shared(use_shared), d_row_ptr(data.d_row_ptr), d_data(data.d_data), num_features(num_features), entry_start(entry_start) { extern __shared__ float _smem[]; smem = _smem; // Copy instances if (use_shared) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; int shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { bst_uint elem_begin = d_row_ptr[global_idx]; bst_uint elem_end = d_row_ptr[global_idx + 1]; for (bst_uint elem_idx = elem_begin; elem_idx < elem_end; elem_idx++) { Entry elem = d_data[elem_idx - entry_start]; smem[threadIdx.x * num_features + elem.index] = elem.fvalue; } } __syncthreads(); } } __device__ float GetFvalue(int ridx, int fidx) const { if (use_shared) { return smem[threadIdx.x * num_features + fidx]; } else { // Binary search auto begin_ptr = d_data.begin() + (d_row_ptr[ridx] - entry_start); auto end_ptr = d_data.begin() + (d_row_ptr[ridx + 1] - entry_start); common::Span<const Entry>::iterator previous_middle; while (end_ptr != begin_ptr) { auto middle = begin_ptr + (end_ptr - begin_ptr) / 2; if (middle == previous_middle) { break; } else { previous_middle = middle; } if (middle->index == fidx) { return middle->fvalue; } else if (middle->index < fidx) { begin_ptr = middle; } else { end_ptr = middle; } } // Value is missing return nanf(""); } } }; struct EllpackLoader { EllpackDeviceAccessor const& matrix; XGBOOST_DEVICE EllpackLoader(EllpackDeviceAccessor const& m, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : matrix{m} {} __device__ __forceinline__ float GetFvalue(int ridx, int fidx) const { auto gidx = matrix.GetBinIndex(ridx, fidx); if (gidx == -1) { return nan(""); } // The gradient index needs to be shifted by one as min values are not included in the // cuts. if (gidx == matrix.feature_segments[fidx]) { return matrix.min_fvalue[fidx]; } return matrix.gidx_fvalue_map[gidx - 1]; } }; struct CuPyAdapterLoader { data::CupyAdapterBatch batch; bst_feature_t columns; float* smem; bool use_shared; DEV_INLINE CuPyAdapterLoader(data::CupyAdapterBatch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { auto beg = global_idx * columns; auto end = (global_idx + 1) * columns; for (size_t i = beg; i < end; ++i) { smem[threadIdx.x * num_features + (i - beg)] = batch.GetElement(i).value; } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetElement(ridx * columns + fidx).value; } }; struct CuDFAdapterLoader { data::CudfAdapterBatch batch; bst_feature_t columns; float* smem; bool use_shared; DEV_INLINE CuDFAdapterLoader(data::CudfAdapterBatch const batch, bool use_shared, bst_feature_t num_features, bst_row_t num_rows, size_t entry_start) : batch{batch}, columns{num_features}, use_shared{use_shared} { extern __shared__ float _smem[]; smem = _smem; if (use_shared) { uint32_t global_idx = blockDim.x * blockIdx.x + threadIdx.x; size_t shared_elements = blockDim.x * num_features; dh::BlockFill(smem, shared_elements, nanf("")); __syncthreads(); if (global_idx < num_rows) { for (size_t i = 0; i < columns; ++i) { smem[threadIdx.x * columns + i] = batch.GetValue(global_idx, i); } } } __syncthreads(); } DEV_INLINE float GetFvalue(bst_row_t ridx, bst_feature_t fidx) const { if (use_shared) { return smem[threadIdx.x * columns + fidx]; } return batch.GetValue(ridx, fidx); } }; template <typename Loader> __device__ float GetLeafWeight(bst_uint ridx, const RegTree::Node* tree, Loader* loader) { RegTree::Node n = tree[0]; while (!n.IsLeaf()) { float fvalue = loader->GetFvalue(ridx, n.SplitIndex()); // Missing value if (isnan(fvalue)) { n = tree[n.DefaultChild()]; } else { if (fvalue < n.SplitCond()) { n = tree[n.LeftChild()]; } else { n = tree[n.RightChild()]; } } } return n.LeafValue(); } template <typename Loader, typename Data> __global__ void PredictKernel(Data data, common::Span<const RegTree::Node> d_nodes, common::Span<float> d_out_predictions, common::Span<size_t> d_tree_segments, common::Span<int> d_tree_group, size_t tree_begin, size_t tree_end, size_t num_features, size_t num_rows, size_t entry_start, bool use_shared, int num_group) { bst_uint global_idx = blockDim.x * blockIdx.x + threadIdx.x; Loader loader(data, use_shared, num_features, num_rows, entry_start); if (global_idx >= num_rows) return; if (num_group == 1) { float sum = 0; for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; float leaf = GetLeafWeight(global_idx, d_tree, &loader); sum += leaf; } d_out_predictions[global_idx] += sum; } else { for (int tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { int tree_group = d_tree_group[tree_idx]; const RegTree::Node* d_tree = &d_nodes[d_tree_segments[tree_idx - tree_begin]]; bst_uint out_prediction_idx = global_idx * num_group + tree_group; d_out_predictions[out_prediction_idx] += GetLeafWeight(global_idx, d_tree, &loader); } } } class DeviceModel { public: dh::device_vector<RegTree::Node> nodes; dh::device_vector<size_t> tree_segments; dh::device_vector<int> tree_group; size_t tree_beg_; // NOLINT size_t tree_end_; // NOLINT int num_group; void CopyModel(const gbm::GBTreeModel& model, const thrust::host_vector<size_t>& h_tree_segments, const thrust::host_vector<RegTree::Node>& h_nodes, size_t tree_begin, size_t tree_end) { nodes.resize(h_nodes.size()); dh::safe_cuda(cudaMemcpyAsync(nodes.data().get(), h_nodes.data(), sizeof(RegTree::Node) * h_nodes.size(), cudaMemcpyHostToDevice)); tree_segments.resize(h_tree_segments.size()); dh::safe_cuda(cudaMemcpyAsync(tree_segments.data().get(), h_tree_segments.data(), sizeof(size_t) * h_tree_segments.size(), cudaMemcpyHostToDevice)); tree_group.resize(model.tree_info.size()); dh::safe_cuda(cudaMemcpyAsync(tree_group.data().get(), model.tree_info.data(), sizeof(int) * model.tree_info.size(), cudaMemcpyHostToDevice)); this->tree_beg_ = tree_begin; this->tree_end_ = tree_end; this->num_group = model.learner_model_param->num_output_group; } void Init(const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end, int32_t gpu_id) { dh::safe_cuda(cudaSetDevice(gpu_id)); CHECK_EQ(model.param.size_leaf_vector, 0); // Copy decision trees to device thrust::host_vector<size_t> h_tree_segments{}; h_tree_segments.reserve((tree_end - tree_begin) + 1); size_t sum = 0; h_tree_segments.push_back(sum); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { sum += model.trees.at(tree_idx)->GetNodes().size(); h_tree_segments.push_back(sum); } thrust::host_vector<RegTree::Node> h_nodes(h_tree_segments.back()); for (auto tree_idx = tree_begin; tree_idx < tree_end; tree_idx++) { auto& src_nodes = model.trees.at(tree_idx)->GetNodes(); std::copy(src_nodes.begin(), src_nodes.end(), h_nodes.begin() + h_tree_segments[tree_idx - tree_begin]); } CopyModel(model, h_tree_segments, h_nodes, tree_begin, tree_end); } }; class GPUPredictor : public xgboost::Predictor { private: void PredictInternal(const SparsePage& batch, size_t num_features, HostDeviceVector<bst_float>* predictions, size_t batch_offset) { batch.offset.SetDevice(generic_param_->gpu_id); batch.data.SetDevice(generic_param_->gpu_id); const uint32_t BLOCK_THREADS = 128; size_t num_rows = batch.Size(); auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * num_features * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes_) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; SparsePageView data{batch.data.DeviceSpan(), batch.offset.DeviceSpan()}; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<SparsePageLoader, SparsePageView>, data, dh::ToSpan(model_.nodes), predictions->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, num_features, num_rows, entry_start, use_shared, model_.num_group); } void PredictInternal(EllpackDeviceAccessor const& batch, HostDeviceVector<bst_float>* out_preds, size_t batch_offset) { const uint32_t BLOCK_THREADS = 256; size_t num_rows = batch.n_rows; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(num_rows, BLOCK_THREADS)); bool use_shared = false; size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS} ( PredictKernel<EllpackLoader, EllpackDeviceAccessor>, batch, dh::ToSpan(model_.nodes), out_preds->DeviceSpan().subspan(batch_offset), dh::ToSpan(model_.tree_segments), dh::ToSpan(model_.tree_group), model_.tree_beg_, model_.tree_end_, batch.NumFeatures(), num_rows, entry_start, use_shared, model_.num_group); } void DevicePredictInternal(DMatrix* dmat, HostDeviceVector<float>* out_preds, const gbm::GBTreeModel& model, size_t tree_begin, size_t tree_end) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); if (tree_end - tree_begin == 0) { return; } model_.Init(model, tree_begin, tree_end, generic_param_->gpu_id); out_preds->SetDevice(generic_param_->gpu_id); if (dmat->PageExists<EllpackPage>()) { size_t batch_offset = 0; for (auto const& page : dmat->GetBatches<EllpackPage>()) { this->PredictInternal( page.Impl()->GetDeviceAccessor(generic_param_->gpu_id), out_preds, batch_offset); batch_offset += page.Impl()->n_rows; } } else { size_t batch_offset = 0; for (auto &batch : dmat->GetBatches<SparsePage>()) { this->PredictInternal(batch, model.learner_model_param->num_feature, out_preds, batch_offset); batch_offset += batch.Size() * model.learner_model_param->num_output_group; } } } public: explicit GPUPredictor(GenericParameter const* generic_param) : Predictor::Predictor{generic_param} {} ~GPUPredictor() override { if (generic_param_->gpu_id >= 0) { dh::safe_cuda(cudaSetDevice(generic_param_->gpu_id)); } } void PredictBatch(DMatrix* dmat, PredictionCacheEntry* predts, const gbm::GBTreeModel& model, int tree_begin, unsigned ntree_limit = 0) override { // This function is duplicated with CPU predictor PredictBatch, see comments in there. // FIXME(trivialfis): Remove the duplication. std::lock_guard<std::mutex> const guard(lock_); int device = generic_param_->gpu_id; CHECK_GE(device, 0) << "Set `gpu_id' to positive value for processing GPU data."; ConfigureDevice(device); CHECK_EQ(tree_begin, 0); auto* out_preds = &predts->predictions; CHECK_GE(predts->version, tree_begin); if (out_preds->Size() == 0 && dmat->Info().num_row_ != 0) { CHECK_EQ(predts->version, 0); } if (predts->version == 0) { this->InitOutPredictions(dmat->Info(), out_preds, model); } uint32_t const output_groups = model.learner_model_param->num_output_group; CHECK_NE(output_groups, 0); uint32_t real_ntree_limit = ntree_limit * output_groups; if (real_ntree_limit == 0 || real_ntree_limit > model.trees.size()) { real_ntree_limit = static_cast<uint32_t>(model.trees.size()); } uint32_t const end_version = (tree_begin + real_ntree_limit) / output_groups; if (predts->version > end_version) { CHECK_NE(ntree_limit, 0); this->InitOutPredictions(dmat->Info(), out_preds, model); predts->version = 0; } uint32_t const beg_version = predts->version; CHECK_LE(beg_version, end_version); if (beg_version < end_version) { this->DevicePredictInternal(dmat, out_preds, model, beg_version * output_groups, end_version * output_groups); } uint32_t delta = end_version - beg_version; CHECK_LE(delta, model.trees.size()); predts->Update(delta); CHECK(out_preds->Size() == output_groups * dmat->Info().num_row_ || out_preds->Size() == dmat->Info().num_row_); } template <typename Adapter, typename Loader, typename Batch> void DispatchedInplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, uint32_t tree_end) const { auto max_shared_memory_bytes = dh::MaxSharedMemory(this->generic_param_->gpu_id); uint32_t const output_groups = model.learner_model_param->num_output_group; DeviceModel d_model; d_model.Init(model, tree_begin, tree_end, this->generic_param_->gpu_id); auto m = dmlc::get<Adapter>(x); CHECK_EQ(m.NumColumns(), model.learner_model_param->num_feature) << "Number of columns in data must equal to trained model."; CHECK_EQ(this->generic_param_->gpu_id, m.DeviceIdx()) << "XGBoost is running on device: " << this->generic_param_->gpu_id << ", " << "but data is on: " << m.DeviceIdx(); MetaInfo info; info.num_col_ = m.NumColumns(); info.num_row_ = m.NumRows(); this->InitOutPredictions(info, &(out_preds->predictions), model); const uint32_t BLOCK_THREADS = 128; auto GRID_SIZE = static_cast<uint32_t>(common::DivRoundUp(info.num_row_, BLOCK_THREADS)); auto shared_memory_bytes = static_cast<size_t>(sizeof(float) * m.NumColumns() * BLOCK_THREADS); bool use_shared = true; if (shared_memory_bytes > max_shared_memory_bytes) { shared_memory_bytes = 0; use_shared = false; } size_t entry_start = 0; dh::LaunchKernel {GRID_SIZE, BLOCK_THREADS, shared_memory_bytes} ( PredictKernel<Loader, Batch>, m.Value(), dh::ToSpan(d_model.nodes), out_preds->predictions.DeviceSpan(), dh::ToSpan(d_model.tree_segments), dh::ToSpan(d_model.tree_group), tree_begin, tree_end, m.NumColumns(), info.num_row_, entry_start, use_shared, output_groups); } void InplacePredict(dmlc::any const &x, const gbm::GBTreeModel &model, float missing, PredictionCacheEntry *out_preds, uint32_t tree_begin, unsigned tree_end) const override { if (x.type() == typeid(data::CupyAdapter)) { this->DispatchedInplacePredict<data::CupyAdapter, CuPyAdapterLoader, data::CupyAdapterBatch>( x, model, missing, out_preds, tree_begin, tree_end); } else if (x.type() == typeid(data::CudfAdapter)) { this->DispatchedInplacePredict<data::CudfAdapter, CuDFAdapterLoader, data::CudfAdapterBatch>( x, model, missing, out_preds, tree_begin, tree_end); } else { LOG(FATAL) << "Only CuPy and CuDF are supported by GPU Predictor."; } } protected: void InitOutPredictions(const MetaInfo& info, HostDeviceVector<bst_float>* out_preds, const gbm::GBTreeModel& model) const { size_t n_classes = model.learner_model_param->num_output_group; size_t n = n_classes * info.num_row_; const HostDeviceVector<bst_float>& base_margin = info.base_margin_; out_preds->SetDevice(generic_param_->gpu_id); out_preds->Resize(n); if (base_margin.Size() != 0) { CHECK_EQ(base_margin.Size(), n); out_preds->Copy(base_margin); } else { out_preds->Fill(model.learner_model_param->base_score); } } void PredictInstance(const SparsePage::Inst& inst, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictLeaf(DMatrix* p_fmat, std::vector<bst_float>* out_preds, const gbm::GBTreeModel& model, unsigned ntree_limit) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictContribution(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate, int condition, unsigned condition_feature) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void PredictInteractionContributions(DMatrix* p_fmat, std::vector<bst_float>* out_contribs, const gbm::GBTreeModel& model, unsigned ntree_limit, std::vector<bst_float>* tree_weights, bool approximate) override { LOG(FATAL) << "[Internal error]: " << __func__ << " is not implemented in GPU Predictor."; } void Configure(const std::vector<std::pair<std::string, std::string>>& cfg) override { Predictor::Configure(cfg); } private: /*! \brief Reconfigure the device when GPU is changed. */ void ConfigureDevice(int device) { if (device >= 0) { max_shared_memory_bytes_ = dh::MaxSharedMemory(device); } } std::mutex lock_; DeviceModel model_; size_t max_shared_memory_bytes_; }; XGBOOST_REGISTER_PREDICTOR(GPUPredictor, "gpu_predictor") .describe("Make predictions using GPU.") .set_body([](GenericParameter const* generic_param) { return new GPUPredictor(generic_param); }); } // namespace predictor } // namespace xgboost
9c7bd1dd5d746d39416cf748a672088086a71fbd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "box3d4r-32x32-1-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 4 - 4); const AN5D_TYPE __c1Pad = (4); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 4 - 4); const AN5D_TYPE __c2Pad = (4); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 4 - 4); const AN5D_TYPE __c3Pad = (4); #define __c3 c3 const AN5D_TYPE __halo1 = 4; const AN5D_TYPE __halo2 = 4; const AN5D_TYPE __halo3 = 4; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_1_5; double __reg_1_6; double __reg_1_7; double __reg_1_8; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.240f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -4, -4)))) + (0.0020f * (__SBREF(__a_sb, -4, -3)))) + (0.0030f * (__SBREF(__a_sb, -4, -2)))) + (0.0040f * (__SBREF(__a_sb, -4, -1)))) + (0.0050f * (__SBREF(__a_sb, -4, 0)))) + (0.0060f * (__SBREF(__a_sb, -4, 1)))) + (0.0070f * (__SBREF(__a_sb, -4, 2)))) + (0.0080f * (__SBREF(__a_sb, -4, 3)))) + (0.0090f * (__SBREF(__a_sb, -4, 4)))) + (0.0100f * (__SBREF(__a_sb, -3, -4)))) + (0.0110f * (__SBREF(__a_sb, -3, -3)))) + (0.0120f * (__SBREF(__a_sb, -3, -2)))) + (0.0130f * (__SBREF(__a_sb, -3, -1)))) + (0.0140f * (__SBREF(__a_sb, -3, 0)))) + (0.0150f * (__SBREF(__a_sb, -3, 1)))) + (0.0160f * (__SBREF(__a_sb, -3, 2)))) + (0.0170f * (__SBREF(__a_sb, -3, 3)))) + (0.0180f * (__SBREF(__a_sb, -3, 4)))) + (0.0190f * (__SBREF(__a_sb, -2, -4)))) + (0.0200f * (__SBREF(__a_sb, -2, -3)))) + (0.0210f * (__SBREF(__a_sb, -2, -2)))) + (0.0220f * (__SBREF(__a_sb, -2, -1)))) + (0.0230f * (__SBREF(__a_sb, -2, 0)))) + (0.0240f * (__SBREF(__a_sb, -2, 1)))) + (0.0250f * (__SBREF(__a_sb, -2, 2)))) + (0.0260f * (__SBREF(__a_sb, -2, 3)))) + (0.0270f * (__SBREF(__a_sb, -2, 4)))) + (0.0280f * (__SBREF(__a_sb, -1, -4)))) + (0.0290f * (__SBREF(__a_sb, -1, -3)))) + (0.0300f * (__SBREF(__a_sb, -1, -2)))) + (0.0310f * (__SBREF(__a_sb, -1, -1)))) + (0.0320f * (__SBREF(__a_sb, -1, 0)))) + (0.0330f * (__SBREF(__a_sb, -1, 1)))) + (0.0340f * (__SBREF(__a_sb, -1, 2)))) + (0.0350f * (__SBREF(__a_sb, -1, 3)))) + (0.0360f * (__SBREF(__a_sb, -1, 4)))) + (0.0370f * (__SBREF(__a_sb, 0, -4)))) + (0.0380f * (__SBREF(__a_sb, 0, -3)))) + (0.0390f * (__SBREF(__a_sb, 0, -2)))) + (0.0400f * (__SBREF(__a_sb, 0, -1)))) + (0.0410f * (__SBREF(__a_sb, 0, 1)))) + (0.0420f * (__SBREF(__a_sb, 0, 2)))) + (0.0430f * (__SBREF(__a_sb, 0, 3)))) + (0.0440f * (__SBREF(__a_sb, 0, 4)))) + (0.0450f * (__SBREF(__a_sb, 1, -4)))) + (0.0460f * (__SBREF(__a_sb, 1, -3)))) + (0.0470f * (__SBREF(__a_sb, 1, -2)))) + (0.0480f * (__SBREF(__a_sb, 1, -1)))) + (0.0490f * (__SBREF(__a_sb, 1, 0)))) + (0.0500f * (__SBREF(__a_sb, 1, 1)))) + (0.0510f * (__SBREF(__a_sb, 1, 2)))) + (0.0520f * (__SBREF(__a_sb, 1, 3)))) + (0.0530f * (__SBREF(__a_sb, 1, 4)))) + (0.0540f * (__SBREF(__a_sb, 2, -4)))) + (0.0550f * (__SBREF(__a_sb, 2, -3)))) + (0.0560f * (__SBREF(__a_sb, 2, -2)))) + (0.0570f * (__SBREF(__a_sb, 2, -1)))) + (0.0580f * (__SBREF(__a_sb, 2, 0)))) + (0.0590f * (__SBREF(__a_sb, 2, 1)))) + (0.0600f * (__SBREF(__a_sb, 2, 2)))) + (0.0610f * (__SBREF(__a_sb, 2, 3)))) + (0.0620f * (__SBREF(__a_sb, 2, 4)))) + (0.0630f * (__SBREF(__a_sb, 3, -4)))) + (0.0640f * (__SBREF(__a_sb, 3, -3)))) + (0.0650f * (__SBREF(__a_sb, 3, -2)))) + (0.0660f * (__SBREF(__a_sb, 3, -1)))) + (0.0670f * (__SBREF(__a_sb, 3, 0)))) + (0.0680f * (__SBREF(__a_sb, 3, 1)))) + (0.0690f * (__SBREF(__a_sb, 3, 2)))) + (0.0700f * (__SBREF(__a_sb, 3, 3)))) + (0.0710f * (__SBREF(__a_sb, 3, 4)))) + (0.0720f * (__SBREF(__a_sb, 4, -4)))) + (0.0730f * (__SBREF(__a_sb, 4, -3)))) + (0.0740f * (__SBREF(__a_sb, 4, -2)))) + (0.0750f * (__SBREF(__a_sb, 4, -1)))) + (0.0760f * (__SBREF(__a_sb, 4, 0)))) + (0.0770f * (__SBREF(__a_sb, 4, 1)))) + (0.0780f * (__SBREF(__a_sb, 4, 2)))) + (0.0790f * (__SBREF(__a_sb, 4, 3)))) + (0.0800f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.248f) * (__REGREF(__a, 0, 0)))) + (0.0011f * (__SBREF(__a_sb, -4, -4)))) + (0.0021f * (__SBREF(__a_sb, -4, -3)))) + (0.0031f * (__SBREF(__a_sb, -4, -2)))) + (0.0041f * (__SBREF(__a_sb, -4, -1)))) + (0.0051f * (__SBREF(__a_sb, -4, 0)))) + (0.0061f * (__SBREF(__a_sb, -4, 1)))) + (0.0071f * (__SBREF(__a_sb, -4, 2)))) + (0.0081f * (__SBREF(__a_sb, -4, 3)))) + (0.0091f * (__SBREF(__a_sb, -4, 4)))) + (0.0101f * (__SBREF(__a_sb, -3, -4)))) + (0.0111f * (__SBREF(__a_sb, -3, -3)))) + (0.0121f * (__SBREF(__a_sb, -3, -2)))) + (0.0131f * (__SBREF(__a_sb, -3, -1)))) + (0.0141f * (__SBREF(__a_sb, -3, 0)))) + (0.0151f * (__SBREF(__a_sb, -3, 1)))) + (0.0161f * (__SBREF(__a_sb, -3, 2)))) + (0.0171f * (__SBREF(__a_sb, -3, 3)))) + (0.0181f * (__SBREF(__a_sb, -3, 4)))) + (0.0191f * (__SBREF(__a_sb, -2, -4)))) + (0.0201f * (__SBREF(__a_sb, -2, -3)))) + (0.0211f * (__SBREF(__a_sb, -2, -2)))) + (0.0221f * (__SBREF(__a_sb, -2, -1)))) + (0.0231f * (__SBREF(__a_sb, -2, 0)))) + (0.0241f * (__SBREF(__a_sb, -2, 1)))) + (0.0251f * (__SBREF(__a_sb, -2, 2)))) + (0.0261f * (__SBREF(__a_sb, -2, 3)))) + (0.0271f * (__SBREF(__a_sb, -2, 4)))) + (0.0281f * (__SBREF(__a_sb, -1, -4)))) + (0.0291f * (__SBREF(__a_sb, -1, -3)))) + (0.0301f * (__SBREF(__a_sb, -1, -2)))) + (0.0311f * (__SBREF(__a_sb, -1, -1)))) + (0.0321f * (__SBREF(__a_sb, -1, 0)))) + (0.0331f * (__SBREF(__a_sb, -1, 1)))) + (0.0341f * (__SBREF(__a_sb, -1, 2)))) + (0.0351f * (__SBREF(__a_sb, -1, 3)))) + (0.0361f * (__SBREF(__a_sb, -1, 4)))) + (0.0371f * (__SBREF(__a_sb, 0, -4)))) + (0.0381f * (__SBREF(__a_sb, 0, -3)))) + (0.0391f * (__SBREF(__a_sb, 0, -2)))) + (0.0401f * (__SBREF(__a_sb, 0, -1)))) + (0.0411f * (__SBREF(__a_sb, 0, 1)))) + (0.0421f * (__SBREF(__a_sb, 0, 2)))) + (0.0431f * (__SBREF(__a_sb, 0, 3)))) + (0.0441f * (__SBREF(__a_sb, 0, 4)))) + (0.0451f * (__SBREF(__a_sb, 1, -4)))) + (0.0461f * (__SBREF(__a_sb, 1, -3)))) + (0.0471f * (__SBREF(__a_sb, 1, -2)))) + (0.0481f * (__SBREF(__a_sb, 1, -1)))) + (0.0491f * (__SBREF(__a_sb, 1, 0)))) + (0.0501f * (__SBREF(__a_sb, 1, 1)))) + (0.0511f * (__SBREF(__a_sb, 1, 2)))) + (0.0521f * (__SBREF(__a_sb, 1, 3)))) + (0.0531f * (__SBREF(__a_sb, 1, 4)))) + (0.0541f * (__SBREF(__a_sb, 2, -4)))) + (0.0551f * (__SBREF(__a_sb, 2, -3)))) + (0.0561f * (__SBREF(__a_sb, 2, -2)))) + (0.0571f * (__SBREF(__a_sb, 2, -1)))) + (0.0581f * (__SBREF(__a_sb, 2, 0)))) + (0.0591f * (__SBREF(__a_sb, 2, 1)))) + (0.0601f * (__SBREF(__a_sb, 2, 2)))) + (0.0611f * (__SBREF(__a_sb, 2, 3)))) + (0.0621f * (__SBREF(__a_sb, 2, 4)))) + (0.0631f * (__SBREF(__a_sb, 3, -4)))) + (0.0641f * (__SBREF(__a_sb, 3, -3)))) + (0.0651f * (__SBREF(__a_sb, 3, -2)))) + (0.0661f * (__SBREF(__a_sb, 3, -1)))) + (0.0671f * (__SBREF(__a_sb, 3, 0)))) + (0.0681f * (__SBREF(__a_sb, 3, 1)))) + (0.0691f * (__SBREF(__a_sb, 3, 2)))) + (0.0701f * (__SBREF(__a_sb, 3, 3)))) + (0.0711f * (__SBREF(__a_sb, 3, 4)))) + (0.0721f * (__SBREF(__a_sb, 4, -4)))) + (0.0731f * (__SBREF(__a_sb, 4, -3)))) + (0.0741f * (__SBREF(__a_sb, 4, -2)))) + (0.0751f * (__SBREF(__a_sb, 4, -1)))) + (0.0761f * (__SBREF(__a_sb, 4, 0)))) + (0.0771f * (__SBREF(__a_sb, 4, 1)))) + (0.0781f * (__SBREF(__a_sb, 4, 2)))) + (0.0791f * (__SBREF(__a_sb, 4, 3)))) + (0.0801f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.256f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -4, -4)))) + (0.0022f * (__SBREF(__a_sb, -4, -3)))) + (0.0032f * (__SBREF(__a_sb, -4, -2)))) + (0.0042f * (__SBREF(__a_sb, -4, -1)))) + (0.0052f * (__SBREF(__a_sb, -4, 0)))) + (0.0062f * (__SBREF(__a_sb, -4, 1)))) + (0.0072f * (__SBREF(__a_sb, -4, 2)))) + (0.0082f * (__SBREF(__a_sb, -4, 3)))) + (0.0092f * (__SBREF(__a_sb, -4, 4)))) + (0.0102f * (__SBREF(__a_sb, -3, -4)))) + (0.0112f * (__SBREF(__a_sb, -3, -3)))) + (0.0122f * (__SBREF(__a_sb, -3, -2)))) + (0.0132f * (__SBREF(__a_sb, -3, -1)))) + (0.0142f * (__SBREF(__a_sb, -3, 0)))) + (0.0152f * (__SBREF(__a_sb, -3, 1)))) + (0.0162f * (__SBREF(__a_sb, -3, 2)))) + (0.0172f * (__SBREF(__a_sb, -3, 3)))) + (0.0182f * (__SBREF(__a_sb, -3, 4)))) + (0.0192f * (__SBREF(__a_sb, -2, -4)))) + (0.0202f * (__SBREF(__a_sb, -2, -3)))) + (0.0212f * (__SBREF(__a_sb, -2, -2)))) + (0.0222f * (__SBREF(__a_sb, -2, -1)))) + (0.0232f * (__SBREF(__a_sb, -2, 0)))) + (0.0242f * (__SBREF(__a_sb, -2, 1)))) + (0.0252f * (__SBREF(__a_sb, -2, 2)))) + (0.0262f * (__SBREF(__a_sb, -2, 3)))) + (0.0272f * (__SBREF(__a_sb, -2, 4)))) + (0.0282f * (__SBREF(__a_sb, -1, -4)))) + (0.0292f * (__SBREF(__a_sb, -1, -3)))) + (0.0302f * (__SBREF(__a_sb, -1, -2)))) + (0.0312f * (__SBREF(__a_sb, -1, -1)))) + (0.0322f * (__SBREF(__a_sb, -1, 0)))) + (0.0332f * (__SBREF(__a_sb, -1, 1)))) + (0.0342f * (__SBREF(__a_sb, -1, 2)))) + (0.0352f * (__SBREF(__a_sb, -1, 3)))) + (0.0362f * (__SBREF(__a_sb, -1, 4)))) + (0.0372f * (__SBREF(__a_sb, 0, -4)))) + (0.0382f * (__SBREF(__a_sb, 0, -3)))) + (0.0392f * (__SBREF(__a_sb, 0, -2)))) + (0.0402f * (__SBREF(__a_sb, 0, -1)))) + (0.0412f * (__SBREF(__a_sb, 0, 1)))) + (0.0422f * (__SBREF(__a_sb, 0, 2)))) + (0.0432f * (__SBREF(__a_sb, 0, 3)))) + (0.0442f * (__SBREF(__a_sb, 0, 4)))) + (0.0452f * (__SBREF(__a_sb, 1, -4)))) + (0.0462f * (__SBREF(__a_sb, 1, -3)))) + (0.0472f * (__SBREF(__a_sb, 1, -2)))) + (0.0482f * (__SBREF(__a_sb, 1, -1)))) + (0.0492f * (__SBREF(__a_sb, 1, 0)))) + (0.0502f * (__SBREF(__a_sb, 1, 1)))) + (0.0512f * (__SBREF(__a_sb, 1, 2)))) + (0.0522f * (__SBREF(__a_sb, 1, 3)))) + (0.0532f * (__SBREF(__a_sb, 1, 4)))) + (0.0542f * (__SBREF(__a_sb, 2, -4)))) + (0.0552f * (__SBREF(__a_sb, 2, -3)))) + (0.0562f * (__SBREF(__a_sb, 2, -2)))) + (0.0572f * (__SBREF(__a_sb, 2, -1)))) + (0.0582f * (__SBREF(__a_sb, 2, 0)))) + (0.0592f * (__SBREF(__a_sb, 2, 1)))) + (0.0602f * (__SBREF(__a_sb, 2, 2)))) + (0.0612f * (__SBREF(__a_sb, 2, 3)))) + (0.0622f * (__SBREF(__a_sb, 2, 4)))) + (0.0632f * (__SBREF(__a_sb, 3, -4)))) + (0.0642f * (__SBREF(__a_sb, 3, -3)))) + (0.0652f * (__SBREF(__a_sb, 3, -2)))) + (0.0662f * (__SBREF(__a_sb, 3, -1)))) + (0.0672f * (__SBREF(__a_sb, 3, 0)))) + (0.0682f * (__SBREF(__a_sb, 3, 1)))) + (0.0692f * (__SBREF(__a_sb, 3, 2)))) + (0.0702f * (__SBREF(__a_sb, 3, 3)))) + (0.0712f * (__SBREF(__a_sb, 3, 4)))) + (0.0722f * (__SBREF(__a_sb, 4, -4)))) + (0.0732f * (__SBREF(__a_sb, 4, -3)))) + (0.0742f * (__SBREF(__a_sb, 4, -2)))) + (0.0752f * (__SBREF(__a_sb, 4, -1)))) + (0.0762f * (__SBREF(__a_sb, 4, 0)))) + (0.0772f * (__SBREF(__a_sb, 4, 1)))) + (0.0782f * (__SBREF(__a_sb, 4, 2)))) + (0.0792f * (__SBREF(__a_sb, 4, 3)))) + (0.0802f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.264f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -4, -4)))) + (0.0023f * (__SBREF(__a_sb, -4, -3)))) + (0.0033f * (__SBREF(__a_sb, -4, -2)))) + (0.0043f * (__SBREF(__a_sb, -4, -1)))) + (0.0053f * (__SBREF(__a_sb, -4, 0)))) + (0.0063f * (__SBREF(__a_sb, -4, 1)))) + (0.0073f * (__SBREF(__a_sb, -4, 2)))) + (0.0083f * (__SBREF(__a_sb, -4, 3)))) + (0.0093f * (__SBREF(__a_sb, -4, 4)))) + (0.0103f * (__SBREF(__a_sb, -3, -4)))) + (0.0113f * (__SBREF(__a_sb, -3, -3)))) + (0.0123f * (__SBREF(__a_sb, -3, -2)))) + (0.0133f * (__SBREF(__a_sb, -3, -1)))) + (0.0143f * (__SBREF(__a_sb, -3, 0)))) + (0.0153f * (__SBREF(__a_sb, -3, 1)))) + (0.0163f * (__SBREF(__a_sb, -3, 2)))) + (0.0173f * (__SBREF(__a_sb, -3, 3)))) + (0.0183f * (__SBREF(__a_sb, -3, 4)))) + (0.0193f * (__SBREF(__a_sb, -2, -4)))) + (0.0203f * (__SBREF(__a_sb, -2, -3)))) + (0.0213f * (__SBREF(__a_sb, -2, -2)))) + (0.0223f * (__SBREF(__a_sb, -2, -1)))) + (0.0233f * (__SBREF(__a_sb, -2, 0)))) + (0.0243f * (__SBREF(__a_sb, -2, 1)))) + (0.0253f * (__SBREF(__a_sb, -2, 2)))) + (0.0263f * (__SBREF(__a_sb, -2, 3)))) + (0.0273f * (__SBREF(__a_sb, -2, 4)))) + (0.0283f * (__SBREF(__a_sb, -1, -4)))) + (0.0293f * (__SBREF(__a_sb, -1, -3)))) + (0.0303f * (__SBREF(__a_sb, -1, -2)))) + (0.0313f * (__SBREF(__a_sb, -1, -1)))) + (0.0323f * (__SBREF(__a_sb, -1, 0)))) + (0.0333f * (__SBREF(__a_sb, -1, 1)))) + (0.0343f * (__SBREF(__a_sb, -1, 2)))) + (0.0353f * (__SBREF(__a_sb, -1, 3)))) + (0.0363f * (__SBREF(__a_sb, -1, 4)))) + (0.0373f * (__SBREF(__a_sb, 0, -4)))) + (0.0383f * (__SBREF(__a_sb, 0, -3)))) + (0.0393f * (__SBREF(__a_sb, 0, -2)))) + (0.0403f * (__SBREF(__a_sb, 0, -1)))) + (0.0413f * (__SBREF(__a_sb, 0, 1)))) + (0.0423f * (__SBREF(__a_sb, 0, 2)))) + (0.0433f * (__SBREF(__a_sb, 0, 3)))) + (0.0443f * (__SBREF(__a_sb, 0, 4)))) + (0.0453f * (__SBREF(__a_sb, 1, -4)))) + (0.0463f * (__SBREF(__a_sb, 1, -3)))) + (0.0473f * (__SBREF(__a_sb, 1, -2)))) + (0.0483f * (__SBREF(__a_sb, 1, -1)))) + (0.0493f * (__SBREF(__a_sb, 1, 0)))) + (0.0503f * (__SBREF(__a_sb, 1, 1)))) + (0.0513f * (__SBREF(__a_sb, 1, 2)))) + (0.0523f * (__SBREF(__a_sb, 1, 3)))) + (0.0533f * (__SBREF(__a_sb, 1, 4)))) + (0.0543f * (__SBREF(__a_sb, 2, -4)))) + (0.0553f * (__SBREF(__a_sb, 2, -3)))) + (0.0563f * (__SBREF(__a_sb, 2, -2)))) + (0.0573f * (__SBREF(__a_sb, 2, -1)))) + (0.0583f * (__SBREF(__a_sb, 2, 0)))) + (0.0593f * (__SBREF(__a_sb, 2, 1)))) + (0.0603f * (__SBREF(__a_sb, 2, 2)))) + (0.0613f * (__SBREF(__a_sb, 2, 3)))) + (0.0623f * (__SBREF(__a_sb, 2, 4)))) + (0.0633f * (__SBREF(__a_sb, 3, -4)))) + (0.0643f * (__SBREF(__a_sb, 3, -3)))) + (0.0653f * (__SBREF(__a_sb, 3, -2)))) + (0.0663f * (__SBREF(__a_sb, 3, -1)))) + (0.0673f * (__SBREF(__a_sb, 3, 0)))) + (0.0683f * (__SBREF(__a_sb, 3, 1)))) + (0.0693f * (__SBREF(__a_sb, 3, 2)))) + (0.0703f * (__SBREF(__a_sb, 3, 3)))) + (0.0713f * (__SBREF(__a_sb, 3, 4)))) + (0.0723f * (__SBREF(__a_sb, 4, -4)))) + (0.0733f * (__SBREF(__a_sb, 4, -3)))) + (0.0743f * (__SBREF(__a_sb, 4, -2)))) + (0.0753f * (__SBREF(__a_sb, 4, -1)))) + (0.0763f * (__SBREF(__a_sb, 4, 0)))) + (0.0773f * (__SBREF(__a_sb, 4, 1)))) + (0.0783f * (__SBREF(__a_sb, 4, 2)))) + (0.0793f * (__SBREF(__a_sb, 4, 3)))) + (0.0803f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.272f) * (__REGREF(__a, 0, 0)))) + (0.0014f * (__SBREF(__a_sb, -4, -4)))) + (0.0024f * (__SBREF(__a_sb, -4, -3)))) + (0.0034f * (__SBREF(__a_sb, -4, -2)))) + (0.0044f * (__SBREF(__a_sb, -4, -1)))) + (0.0054f * (__SBREF(__a_sb, -4, 0)))) + (0.0064f * (__SBREF(__a_sb, -4, 1)))) + (0.0074f * (__SBREF(__a_sb, -4, 2)))) + (0.0084f * (__SBREF(__a_sb, -4, 3)))) + (0.0094f * (__SBREF(__a_sb, -4, 4)))) + (0.0104f * (__SBREF(__a_sb, -3, -4)))) + (0.0114f * (__SBREF(__a_sb, -3, -3)))) + (0.0124f * (__SBREF(__a_sb, -3, -2)))) + (0.0134f * (__SBREF(__a_sb, -3, -1)))) + (0.0144f * (__SBREF(__a_sb, -3, 0)))) + (0.0154f * (__SBREF(__a_sb, -3, 1)))) + (0.0164f * (__SBREF(__a_sb, -3, 2)))) + (0.0174f * (__SBREF(__a_sb, -3, 3)))) + (0.0184f * (__SBREF(__a_sb, -3, 4)))) + (0.0194f * (__SBREF(__a_sb, -2, -4)))) + (0.0204f * (__SBREF(__a_sb, -2, -3)))) + (0.0214f * (__SBREF(__a_sb, -2, -2)))) + (0.0224f * (__SBREF(__a_sb, -2, -1)))) + (0.0234f * (__SBREF(__a_sb, -2, 0)))) + (0.0244f * (__SBREF(__a_sb, -2, 1)))) + (0.0254f * (__SBREF(__a_sb, -2, 2)))) + (0.0264f * (__SBREF(__a_sb, -2, 3)))) + (0.0274f * (__SBREF(__a_sb, -2, 4)))) + (0.0284f * (__SBREF(__a_sb, -1, -4)))) + (0.0294f * (__SBREF(__a_sb, -1, -3)))) + (0.0304f * (__SBREF(__a_sb, -1, -2)))) + (0.0314f * (__SBREF(__a_sb, -1, -1)))) + (0.0324f * (__SBREF(__a_sb, -1, 0)))) + (0.0334f * (__SBREF(__a_sb, -1, 1)))) + (0.0344f * (__SBREF(__a_sb, -1, 2)))) + (0.0354f * (__SBREF(__a_sb, -1, 3)))) + (0.0364f * (__SBREF(__a_sb, -1, 4)))) + (0.0374f * (__SBREF(__a_sb, 0, -4)))) + (0.0384f * (__SBREF(__a_sb, 0, -3)))) + (0.0394f * (__SBREF(__a_sb, 0, -2)))) + (0.0404f * (__SBREF(__a_sb, 0, -1)))) + (0.0414f * (__SBREF(__a_sb, 0, 1)))) + (0.0424f * (__SBREF(__a_sb, 0, 2)))) + (0.0434f * (__SBREF(__a_sb, 0, 3)))) + (0.0444f * (__SBREF(__a_sb, 0, 4)))) + (0.0454f * (__SBREF(__a_sb, 1, -4)))) + (0.0464f * (__SBREF(__a_sb, 1, -3)))) + (0.0474f * (__SBREF(__a_sb, 1, -2)))) + (0.0484f * (__SBREF(__a_sb, 1, -1)))) + (0.0494f * (__SBREF(__a_sb, 1, 0)))) + (0.0504f * (__SBREF(__a_sb, 1, 1)))) + (0.0514f * (__SBREF(__a_sb, 1, 2)))) + (0.0524f * (__SBREF(__a_sb, 1, 3)))) + (0.0534f * (__SBREF(__a_sb, 1, 4)))) + (0.0544f * (__SBREF(__a_sb, 2, -4)))) + (0.0554f * (__SBREF(__a_sb, 2, -3)))) + (0.0564f * (__SBREF(__a_sb, 2, -2)))) + (0.0574f * (__SBREF(__a_sb, 2, -1)))) + (0.0584f * (__SBREF(__a_sb, 2, 0)))) + (0.0594f * (__SBREF(__a_sb, 2, 1)))) + (0.0604f * (__SBREF(__a_sb, 2, 2)))) + (0.0614f * (__SBREF(__a_sb, 2, 3)))) + (0.0624f * (__SBREF(__a_sb, 2, 4)))) + (0.0634f * (__SBREF(__a_sb, 3, -4)))) + (0.0644f * (__SBREF(__a_sb, 3, -3)))) + (0.0654f * (__SBREF(__a_sb, 3, -2)))) + (0.0664f * (__SBREF(__a_sb, 3, -1)))) + (0.0674f * (__SBREF(__a_sb, 3, 0)))) + (0.0684f * (__SBREF(__a_sb, 3, 1)))) + (0.0694f * (__SBREF(__a_sb, 3, 2)))) + (0.0704f * (__SBREF(__a_sb, 3, 3)))) + (0.0714f * (__SBREF(__a_sb, 3, 4)))) + (0.0724f * (__SBREF(__a_sb, 4, -4)))) + (0.0734f * (__SBREF(__a_sb, 4, -3)))) + (0.0744f * (__SBREF(__a_sb, 4, -2)))) + (0.0754f * (__SBREF(__a_sb, 4, -1)))) + (0.0764f * (__SBREF(__a_sb, 4, 0)))) + (0.0774f * (__SBREF(__a_sb, 4, 1)))) + (0.0784f * (__SBREF(__a_sb, 4, 2)))) + (0.0794f * (__SBREF(__a_sb, 4, 3)))) + (0.0804f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.280f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -4, -4)))) + (0.0025f * (__SBREF(__a_sb, -4, -3)))) + (0.0035f * (__SBREF(__a_sb, -4, -2)))) + (0.0045f * (__SBREF(__a_sb, -4, -1)))) + (0.0055f * (__SBREF(__a_sb, -4, 0)))) + (0.0065f * (__SBREF(__a_sb, -4, 1)))) + (0.0075f * (__SBREF(__a_sb, -4, 2)))) + (0.0085f * (__SBREF(__a_sb, -4, 3)))) + (0.0095f * (__SBREF(__a_sb, -4, 4)))) + (0.0105f * (__SBREF(__a_sb, -3, -4)))) + (0.0115f * (__SBREF(__a_sb, -3, -3)))) + (0.0125f * (__SBREF(__a_sb, -3, -2)))) + (0.0135f * (__SBREF(__a_sb, -3, -1)))) + (0.0145f * (__SBREF(__a_sb, -3, 0)))) + (0.0155f * (__SBREF(__a_sb, -3, 1)))) + (0.0165f * (__SBREF(__a_sb, -3, 2)))) + (0.0175f * (__SBREF(__a_sb, -3, 3)))) + (0.0185f * (__SBREF(__a_sb, -3, 4)))) + (0.0195f * (__SBREF(__a_sb, -2, -4)))) + (0.0205f * (__SBREF(__a_sb, -2, -3)))) + (0.0215f * (__SBREF(__a_sb, -2, -2)))) + (0.0225f * (__SBREF(__a_sb, -2, -1)))) + (0.0235f * (__SBREF(__a_sb, -2, 0)))) + (0.0245f * (__SBREF(__a_sb, -2, 1)))) + (0.0255f * (__SBREF(__a_sb, -2, 2)))) + (0.0265f * (__SBREF(__a_sb, -2, 3)))) + (0.0275f * (__SBREF(__a_sb, -2, 4)))) + (0.0285f * (__SBREF(__a_sb, -1, -4)))) + (0.0295f * (__SBREF(__a_sb, -1, -3)))) + (0.0305f * (__SBREF(__a_sb, -1, -2)))) + (0.0315f * (__SBREF(__a_sb, -1, -1)))) + (0.0325f * (__SBREF(__a_sb, -1, 0)))) + (0.0335f * (__SBREF(__a_sb, -1, 1)))) + (0.0345f * (__SBREF(__a_sb, -1, 2)))) + (0.0355f * (__SBREF(__a_sb, -1, 3)))) + (0.0365f * (__SBREF(__a_sb, -1, 4)))) + (0.0375f * (__SBREF(__a_sb, 0, -4)))) + (0.0385f * (__SBREF(__a_sb, 0, -3)))) + (0.0395f * (__SBREF(__a_sb, 0, -2)))) + (0.0405f * (__SBREF(__a_sb, 0, -1)))) + (0.0415f * (__SBREF(__a_sb, 0, 1)))) + (0.0425f * (__SBREF(__a_sb, 0, 2)))) + (0.0435f * (__SBREF(__a_sb, 0, 3)))) + (0.0445f * (__SBREF(__a_sb, 0, 4)))) + (0.0455f * (__SBREF(__a_sb, 1, -4)))) + (0.0465f * (__SBREF(__a_sb, 1, -3)))) + (0.0475f * (__SBREF(__a_sb, 1, -2)))) + (0.0485f * (__SBREF(__a_sb, 1, -1)))) + (0.0495f * (__SBREF(__a_sb, 1, 0)))) + (0.0505f * (__SBREF(__a_sb, 1, 1)))) + (0.0515f * (__SBREF(__a_sb, 1, 2)))) + (0.0525f * (__SBREF(__a_sb, 1, 3)))) + (0.0535f * (__SBREF(__a_sb, 1, 4)))) + (0.0545f * (__SBREF(__a_sb, 2, -4)))) + (0.0555f * (__SBREF(__a_sb, 2, -3)))) + (0.0565f * (__SBREF(__a_sb, 2, -2)))) + (0.0575f * (__SBREF(__a_sb, 2, -1)))) + (0.0585f * (__SBREF(__a_sb, 2, 0)))) + (0.0595f * (__SBREF(__a_sb, 2, 1)))) + (0.0605f * (__SBREF(__a_sb, 2, 2)))) + (0.0615f * (__SBREF(__a_sb, 2, 3)))) + (0.0625f * (__SBREF(__a_sb, 2, 4)))) + (0.0635f * (__SBREF(__a_sb, 3, -4)))) + (0.0645f * (__SBREF(__a_sb, 3, -3)))) + (0.0655f * (__SBREF(__a_sb, 3, -2)))) + (0.0665f * (__SBREF(__a_sb, 3, -1)))) + (0.0675f * (__SBREF(__a_sb, 3, 0)))) + (0.0685f * (__SBREF(__a_sb, 3, 1)))) + (0.0695f * (__SBREF(__a_sb, 3, 2)))) + (0.0705f * (__SBREF(__a_sb, 3, 3)))) + (0.0715f * (__SBREF(__a_sb, 3, 4)))) + (0.0725f * (__SBREF(__a_sb, 4, -4)))) + (0.0735f * (__SBREF(__a_sb, 4, -3)))) + (0.0745f * (__SBREF(__a_sb, 4, -2)))) + (0.0755f * (__SBREF(__a_sb, 4, -1)))) + (0.0765f * (__SBREF(__a_sb, 4, 0)))) + (0.0775f * (__SBREF(__a_sb, 4, 1)))) + (0.0785f * (__SBREF(__a_sb, 4, 2)))) + (0.0795f * (__SBREF(__a_sb, 4, 3)))) + (0.0805f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.288f) * (__REGREF(__a, 0, 0)))) + (0.0016f * (__SBREF(__a_sb, -4, -4)))) + (0.0026f * (__SBREF(__a_sb, -4, -3)))) + (0.0036f * (__SBREF(__a_sb, -4, -2)))) + (0.0046f * (__SBREF(__a_sb, -4, -1)))) + (0.0056f * (__SBREF(__a_sb, -4, 0)))) + (0.0066f * (__SBREF(__a_sb, -4, 1)))) + (0.0076f * (__SBREF(__a_sb, -4, 2)))) + (0.0086f * (__SBREF(__a_sb, -4, 3)))) + (0.0096f * (__SBREF(__a_sb, -4, 4)))) + (0.0106f * (__SBREF(__a_sb, -3, -4)))) + (0.0116f * (__SBREF(__a_sb, -3, -3)))) + (0.0126f * (__SBREF(__a_sb, -3, -2)))) + (0.0136f * (__SBREF(__a_sb, -3, -1)))) + (0.0146f * (__SBREF(__a_sb, -3, 0)))) + (0.0156f * (__SBREF(__a_sb, -3, 1)))) + (0.0166f * (__SBREF(__a_sb, -3, 2)))) + (0.0176f * (__SBREF(__a_sb, -3, 3)))) + (0.0186f * (__SBREF(__a_sb, -3, 4)))) + (0.0196f * (__SBREF(__a_sb, -2, -4)))) + (0.0206f * (__SBREF(__a_sb, -2, -3)))) + (0.0216f * (__SBREF(__a_sb, -2, -2)))) + (0.0226f * (__SBREF(__a_sb, -2, -1)))) + (0.0236f * (__SBREF(__a_sb, -2, 0)))) + (0.0246f * (__SBREF(__a_sb, -2, 1)))) + (0.0256f * (__SBREF(__a_sb, -2, 2)))) + (0.0266f * (__SBREF(__a_sb, -2, 3)))) + (0.0276f * (__SBREF(__a_sb, -2, 4)))) + (0.0286f * (__SBREF(__a_sb, -1, -4)))) + (0.0296f * (__SBREF(__a_sb, -1, -3)))) + (0.0306f * (__SBREF(__a_sb, -1, -2)))) + (0.0316f * (__SBREF(__a_sb, -1, -1)))) + (0.0326f * (__SBREF(__a_sb, -1, 0)))) + (0.0336f * (__SBREF(__a_sb, -1, 1)))) + (0.0346f * (__SBREF(__a_sb, -1, 2)))) + (0.0356f * (__SBREF(__a_sb, -1, 3)))) + (0.0366f * (__SBREF(__a_sb, -1, 4)))) + (0.0376f * (__SBREF(__a_sb, 0, -4)))) + (0.0386f * (__SBREF(__a_sb, 0, -3)))) + (0.0396f * (__SBREF(__a_sb, 0, -2)))) + (0.0406f * (__SBREF(__a_sb, 0, -1)))) + (0.0416f * (__SBREF(__a_sb, 0, 1)))) + (0.0426f * (__SBREF(__a_sb, 0, 2)))) + (0.0436f * (__SBREF(__a_sb, 0, 3)))) + (0.0446f * (__SBREF(__a_sb, 0, 4)))) + (0.0456f * (__SBREF(__a_sb, 1, -4)))) + (0.0466f * (__SBREF(__a_sb, 1, -3)))) + (0.0476f * (__SBREF(__a_sb, 1, -2)))) + (0.0486f * (__SBREF(__a_sb, 1, -1)))) + (0.0496f * (__SBREF(__a_sb, 1, 0)))) + (0.0506f * (__SBREF(__a_sb, 1, 1)))) + (0.0516f * (__SBREF(__a_sb, 1, 2)))) + (0.0526f * (__SBREF(__a_sb, 1, 3)))) + (0.0536f * (__SBREF(__a_sb, 1, 4)))) + (0.0546f * (__SBREF(__a_sb, 2, -4)))) + (0.0556f * (__SBREF(__a_sb, 2, -3)))) + (0.0566f * (__SBREF(__a_sb, 2, -2)))) + (0.0576f * (__SBREF(__a_sb, 2, -1)))) + (0.0586f * (__SBREF(__a_sb, 2, 0)))) + (0.0596f * (__SBREF(__a_sb, 2, 1)))) + (0.0606f * (__SBREF(__a_sb, 2, 2)))) + (0.0616f * (__SBREF(__a_sb, 2, 3)))) + (0.0626f * (__SBREF(__a_sb, 2, 4)))) + (0.0636f * (__SBREF(__a_sb, 3, -4)))) + (0.0646f * (__SBREF(__a_sb, 3, -3)))) + (0.0656f * (__SBREF(__a_sb, 3, -2)))) + (0.0666f * (__SBREF(__a_sb, 3, -1)))) + (0.0676f * (__SBREF(__a_sb, 3, 0)))) + (0.0686f * (__SBREF(__a_sb, 3, 1)))) + (0.0696f * (__SBREF(__a_sb, 3, 2)))) + (0.0706f * (__SBREF(__a_sb, 3, 3)))) + (0.0716f * (__SBREF(__a_sb, 3, 4)))) + (0.0726f * (__SBREF(__a_sb, 4, -4)))) + (0.0736f * (__SBREF(__a_sb, 4, -3)))) + (0.0746f * (__SBREF(__a_sb, 4, -2)))) + (0.0756f * (__SBREF(__a_sb, 4, -1)))) + (0.0766f * (__SBREF(__a_sb, 4, 0)))) + (0.0776f * (__SBREF(__a_sb, 4, 1)))) + (0.0786f * (__SBREF(__a_sb, 4, 2)))) + (0.0796f * (__SBREF(__a_sb, 4, 3)))) + (0.0806f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.296f) * (__REGREF(__a, 0, 0)))) + (0.0017f * (__SBREF(__a_sb, -4, -4)))) + (0.0027f * (__SBREF(__a_sb, -4, -3)))) + (0.0037f * (__SBREF(__a_sb, -4, -2)))) + (0.0047f * (__SBREF(__a_sb, -4, -1)))) + (0.0057f * (__SBREF(__a_sb, -4, 0)))) + (0.0067f * (__SBREF(__a_sb, -4, 1)))) + (0.0077f * (__SBREF(__a_sb, -4, 2)))) + (0.0087f * (__SBREF(__a_sb, -4, 3)))) + (0.0097f * (__SBREF(__a_sb, -4, 4)))) + (0.0107f * (__SBREF(__a_sb, -3, -4)))) + (0.0117f * (__SBREF(__a_sb, -3, -3)))) + (0.0127f * (__SBREF(__a_sb, -3, -2)))) + (0.0137f * (__SBREF(__a_sb, -3, -1)))) + (0.0147f * (__SBREF(__a_sb, -3, 0)))) + (0.0157f * (__SBREF(__a_sb, -3, 1)))) + (0.0167f * (__SBREF(__a_sb, -3, 2)))) + (0.0177f * (__SBREF(__a_sb, -3, 3)))) + (0.0187f * (__SBREF(__a_sb, -3, 4)))) + (0.0197f * (__SBREF(__a_sb, -2, -4)))) + (0.0207f * (__SBREF(__a_sb, -2, -3)))) + (0.0217f * (__SBREF(__a_sb, -2, -2)))) + (0.0227f * (__SBREF(__a_sb, -2, -1)))) + (0.0237f * (__SBREF(__a_sb, -2, 0)))) + (0.0247f * (__SBREF(__a_sb, -2, 1)))) + (0.0257f * (__SBREF(__a_sb, -2, 2)))) + (0.0267f * (__SBREF(__a_sb, -2, 3)))) + (0.0277f * (__SBREF(__a_sb, -2, 4)))) + (0.0287f * (__SBREF(__a_sb, -1, -4)))) + (0.0297f * (__SBREF(__a_sb, -1, -3)))) + (0.0307f * (__SBREF(__a_sb, -1, -2)))) + (0.0317f * (__SBREF(__a_sb, -1, -1)))) + (0.0327f * (__SBREF(__a_sb, -1, 0)))) + (0.0337f * (__SBREF(__a_sb, -1, 1)))) + (0.0347f * (__SBREF(__a_sb, -1, 2)))) + (0.0357f * (__SBREF(__a_sb, -1, 3)))) + (0.0367f * (__SBREF(__a_sb, -1, 4)))) + (0.0377f * (__SBREF(__a_sb, 0, -4)))) + (0.0387f * (__SBREF(__a_sb, 0, -3)))) + (0.0397f * (__SBREF(__a_sb, 0, -2)))) + (0.0407f * (__SBREF(__a_sb, 0, -1)))) + (0.0417f * (__SBREF(__a_sb, 0, 1)))) + (0.0427f * (__SBREF(__a_sb, 0, 2)))) + (0.0437f * (__SBREF(__a_sb, 0, 3)))) + (0.0447f * (__SBREF(__a_sb, 0, 4)))) + (0.0457f * (__SBREF(__a_sb, 1, -4)))) + (0.0467f * (__SBREF(__a_sb, 1, -3)))) + (0.0477f * (__SBREF(__a_sb, 1, -2)))) + (0.0487f * (__SBREF(__a_sb, 1, -1)))) + (0.0497f * (__SBREF(__a_sb, 1, 0)))) + (0.0507f * (__SBREF(__a_sb, 1, 1)))) + (0.0517f * (__SBREF(__a_sb, 1, 2)))) + (0.0527f * (__SBREF(__a_sb, 1, 3)))) + (0.0537f * (__SBREF(__a_sb, 1, 4)))) + (0.0547f * (__SBREF(__a_sb, 2, -4)))) + (0.0557f * (__SBREF(__a_sb, 2, -3)))) + (0.0567f * (__SBREF(__a_sb, 2, -2)))) + (0.0577f * (__SBREF(__a_sb, 2, -1)))) + (0.0587f * (__SBREF(__a_sb, 2, 0)))) + (0.0597f * (__SBREF(__a_sb, 2, 1)))) + (0.0607f * (__SBREF(__a_sb, 2, 2)))) + (0.0617f * (__SBREF(__a_sb, 2, 3)))) + (0.0627f * (__SBREF(__a_sb, 2, 4)))) + (0.0637f * (__SBREF(__a_sb, 3, -4)))) + (0.0647f * (__SBREF(__a_sb, 3, -3)))) + (0.0657f * (__SBREF(__a_sb, 3, -2)))) + (0.0667f * (__SBREF(__a_sb, 3, -1)))) + (0.0677f * (__SBREF(__a_sb, 3, 0)))) + (0.0687f * (__SBREF(__a_sb, 3, 1)))) + (0.0697f * (__SBREF(__a_sb, 3, 2)))) + (0.0707f * (__SBREF(__a_sb, 3, 3)))) + (0.0717f * (__SBREF(__a_sb, 3, 4)))) + (0.0727f * (__SBREF(__a_sb, 4, -4)))) + (0.0737f * (__SBREF(__a_sb, 4, -3)))) + (0.0747f * (__SBREF(__a_sb, 4, -2)))) + (0.0757f * (__SBREF(__a_sb, 4, -1)))) + (0.0767f * (__SBREF(__a_sb, 4, 0)))) + (0.0777f * (__SBREF(__a_sb, 4, 1)))) + (0.0787f * (__SBREF(__a_sb, 4, 2)))) + (0.0797f * (__SBREF(__a_sb, 4, 3)))) + (0.0807f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.304f) * (__REGREF(__a, 0, 0)))) + (0.0018f * (__SBREF(__a_sb, -4, -4)))) + (0.0028f * (__SBREF(__a_sb, -4, -3)))) + (0.0038f * (__SBREF(__a_sb, -4, -2)))) + (0.0048f * (__SBREF(__a_sb, -4, -1)))) + (0.0058f * (__SBREF(__a_sb, -4, 0)))) + (0.0068f * (__SBREF(__a_sb, -4, 1)))) + (0.0078f * (__SBREF(__a_sb, -4, 2)))) + (0.0088f * (__SBREF(__a_sb, -4, 3)))) + (0.0098f * (__SBREF(__a_sb, -4, 4)))) + (0.0108f * (__SBREF(__a_sb, -3, -4)))) + (0.0118f * (__SBREF(__a_sb, -3, -3)))) + (0.0128f * (__SBREF(__a_sb, -3, -2)))) + (0.0138f * (__SBREF(__a_sb, -3, -1)))) + (0.0148f * (__SBREF(__a_sb, -3, 0)))) + (0.0158f * (__SBREF(__a_sb, -3, 1)))) + (0.0168f * (__SBREF(__a_sb, -3, 2)))) + (0.0178f * (__SBREF(__a_sb, -3, 3)))) + (0.0188f * (__SBREF(__a_sb, -3, 4)))) + (0.0198f * (__SBREF(__a_sb, -2, -4)))) + (0.0208f * (__SBREF(__a_sb, -2, -3)))) + (0.0218f * (__SBREF(__a_sb, -2, -2)))) + (0.0228f * (__SBREF(__a_sb, -2, -1)))) + (0.0238f * (__SBREF(__a_sb, -2, 0)))) + (0.0248f * (__SBREF(__a_sb, -2, 1)))) + (0.0258f * (__SBREF(__a_sb, -2, 2)))) + (0.0268f * (__SBREF(__a_sb, -2, 3)))) + (0.0278f * (__SBREF(__a_sb, -2, 4)))) + (0.0288f * (__SBREF(__a_sb, -1, -4)))) + (0.0298f * (__SBREF(__a_sb, -1, -3)))) + (0.0308f * (__SBREF(__a_sb, -1, -2)))) + (0.0318f * (__SBREF(__a_sb, -1, -1)))) + (0.0328f * (__SBREF(__a_sb, -1, 0)))) + (0.0338f * (__SBREF(__a_sb, -1, 1)))) + (0.0348f * (__SBREF(__a_sb, -1, 2)))) + (0.0358f * (__SBREF(__a_sb, -1, 3)))) + (0.0368f * (__SBREF(__a_sb, -1, 4)))) + (0.0378f * (__SBREF(__a_sb, 0, -4)))) + (0.0388f * (__SBREF(__a_sb, 0, -3)))) + (0.0398f * (__SBREF(__a_sb, 0, -2)))) + (0.0408f * (__SBREF(__a_sb, 0, -1)))) + (0.0418f * (__SBREF(__a_sb, 0, 1)))) + (0.0428f * (__SBREF(__a_sb, 0, 2)))) + (0.0438f * (__SBREF(__a_sb, 0, 3)))) + (0.0448f * (__SBREF(__a_sb, 0, 4)))) + (0.0458f * (__SBREF(__a_sb, 1, -4)))) + (0.0468f * (__SBREF(__a_sb, 1, -3)))) + (0.0478f * (__SBREF(__a_sb, 1, -2)))) + (0.0488f * (__SBREF(__a_sb, 1, -1)))) + (0.0498f * (__SBREF(__a_sb, 1, 0)))) + (0.0508f * (__SBREF(__a_sb, 1, 1)))) + (0.0518f * (__SBREF(__a_sb, 1, 2)))) + (0.0528f * (__SBREF(__a_sb, 1, 3)))) + (0.0538f * (__SBREF(__a_sb, 1, 4)))) + (0.0548f * (__SBREF(__a_sb, 2, -4)))) + (0.0558f * (__SBREF(__a_sb, 2, -3)))) + (0.0568f * (__SBREF(__a_sb, 2, -2)))) + (0.0578f * (__SBREF(__a_sb, 2, -1)))) + (0.0588f * (__SBREF(__a_sb, 2, 0)))) + (0.0598f * (__SBREF(__a_sb, 2, 1)))) + (0.0608f * (__SBREF(__a_sb, 2, 2)))) + (0.0618f * (__SBREF(__a_sb, 2, 3)))) + (0.0628f * (__SBREF(__a_sb, 2, 4)))) + (0.0638f * (__SBREF(__a_sb, 3, -4)))) + (0.0648f * (__SBREF(__a_sb, 3, -3)))) + (0.0658f * (__SBREF(__a_sb, 3, -2)))) + (0.0668f * (__SBREF(__a_sb, 3, -1)))) + (0.0678f * (__SBREF(__a_sb, 3, 0)))) + (0.0688f * (__SBREF(__a_sb, 3, 1)))) + (0.0698f * (__SBREF(__a_sb, 3, 2)))) + (0.0708f * (__SBREF(__a_sb, 3, 3)))) + (0.0718f * (__SBREF(__a_sb, 3, 4)))) + (0.0728f * (__SBREF(__a_sb, 4, -4)))) + (0.0738f * (__SBREF(__a_sb, 4, -3)))) + (0.0748f * (__SBREF(__a_sb, 4, -2)))) + (0.0758f * (__SBREF(__a_sb, 4, -1)))) + (0.0768f * (__SBREF(__a_sb, 4, 0)))) + (0.0778f * (__SBREF(__a_sb, 4, 1)))) + (0.0788f * (__SBREF(__a_sb, 4, 2)))) + (0.0798f * (__SBREF(__a_sb, 4, 3)))) + (0.0808f * (__SBREF(__a_sb, 4, 4)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(4, __reg_1_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(4, __reg_1_4); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); } else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); } else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); __LOAD(__reg_0, __h + 10); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0); __STORE(__h + 6, __reg_1_6); } else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); __LOAD(__reg_0, __h + 10); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h + 6, __reg_1_6); __LOAD(__reg_0, __h + 11); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0); __STORE(__h + 7, __reg_1_7); } } else { for (__h = 9; __h <= __side1LenOl - 9;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; } }
9c7bd1dd5d746d39416cf748a672088086a71fbd.cu
#include "box3d4r-32x32-1-128_kernel.hu" __device__ double __sbref_wrap(double *sb, size_t index) { return sb[index]; } __global__ void kernel0_1(double *A, int dimsize, int timestep, int c0) { #ifndef AN5D_TYPE #define AN5D_TYPE unsigned #endif const AN5D_TYPE __c0Len = (timestep - 0); const AN5D_TYPE __c0Pad = (0); #define __c0 c0 const AN5D_TYPE __c1Len = (dimsize - 4 - 4); const AN5D_TYPE __c1Pad = (4); #define __c1 c1 const AN5D_TYPE __c2Len = (dimsize - 4 - 4); const AN5D_TYPE __c2Pad = (4); #define __c2 c2 const AN5D_TYPE __c3Len = (dimsize - 4 - 4); const AN5D_TYPE __c3Pad = (4); #define __c3 c3 const AN5D_TYPE __halo1 = 4; const AN5D_TYPE __halo2 = 4; const AN5D_TYPE __halo3 = 4; const AN5D_TYPE __side0Len = 1; const AN5D_TYPE __side1Len = 128; const AN5D_TYPE __side2Len = 24; const AN5D_TYPE __side3Len = 24; const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len); const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len); const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len); const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1); const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2); const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3); const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl; const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len; const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len; const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len; const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x; const AN5D_TYPE __local_c2 = __tid / __side3LenOl; const AN5D_TYPE __local_c3 = __tid % __side3LenOl; const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num; const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2; const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3; double __reg_0; double __reg_1_0; double __reg_1_1; double __reg_1_2; double __reg_1_3; double __reg_1_4; double __reg_1_5; double __reg_1_6; double __reg_1_7; double __reg_1_8; __shared__ double __a_sb_double[__blockSize * 2]; double *__a_sb = __a_sb_double; const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3; const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len; const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1); const AN5D_TYPE __storeValid = __writeValid1; AN5D_TYPE __c1; AN5D_TYPE __h; const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id; #define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0) #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.240f) * (__REGREF(__a, 0, 0))) + (0.0010f * (__SBREF(__a_sb, -4, -4)))) + (0.0020f * (__SBREF(__a_sb, -4, -3)))) + (0.0030f * (__SBREF(__a_sb, -4, -2)))) + (0.0040f * (__SBREF(__a_sb, -4, -1)))) + (0.0050f * (__SBREF(__a_sb, -4, 0)))) + (0.0060f * (__SBREF(__a_sb, -4, 1)))) + (0.0070f * (__SBREF(__a_sb, -4, 2)))) + (0.0080f * (__SBREF(__a_sb, -4, 3)))) + (0.0090f * (__SBREF(__a_sb, -4, 4)))) + (0.0100f * (__SBREF(__a_sb, -3, -4)))) + (0.0110f * (__SBREF(__a_sb, -3, -3)))) + (0.0120f * (__SBREF(__a_sb, -3, -2)))) + (0.0130f * (__SBREF(__a_sb, -3, -1)))) + (0.0140f * (__SBREF(__a_sb, -3, 0)))) + (0.0150f * (__SBREF(__a_sb, -3, 1)))) + (0.0160f * (__SBREF(__a_sb, -3, 2)))) + (0.0170f * (__SBREF(__a_sb, -3, 3)))) + (0.0180f * (__SBREF(__a_sb, -3, 4)))) + (0.0190f * (__SBREF(__a_sb, -2, -4)))) + (0.0200f * (__SBREF(__a_sb, -2, -3)))) + (0.0210f * (__SBREF(__a_sb, -2, -2)))) + (0.0220f * (__SBREF(__a_sb, -2, -1)))) + (0.0230f * (__SBREF(__a_sb, -2, 0)))) + (0.0240f * (__SBREF(__a_sb, -2, 1)))) + (0.0250f * (__SBREF(__a_sb, -2, 2)))) + (0.0260f * (__SBREF(__a_sb, -2, 3)))) + (0.0270f * (__SBREF(__a_sb, -2, 4)))) + (0.0280f * (__SBREF(__a_sb, -1, -4)))) + (0.0290f * (__SBREF(__a_sb, -1, -3)))) + (0.0300f * (__SBREF(__a_sb, -1, -2)))) + (0.0310f * (__SBREF(__a_sb, -1, -1)))) + (0.0320f * (__SBREF(__a_sb, -1, 0)))) + (0.0330f * (__SBREF(__a_sb, -1, 1)))) + (0.0340f * (__SBREF(__a_sb, -1, 2)))) + (0.0350f * (__SBREF(__a_sb, -1, 3)))) + (0.0360f * (__SBREF(__a_sb, -1, 4)))) + (0.0370f * (__SBREF(__a_sb, 0, -4)))) + (0.0380f * (__SBREF(__a_sb, 0, -3)))) + (0.0390f * (__SBREF(__a_sb, 0, -2)))) + (0.0400f * (__SBREF(__a_sb, 0, -1)))) + (0.0410f * (__SBREF(__a_sb, 0, 1)))) + (0.0420f * (__SBREF(__a_sb, 0, 2)))) + (0.0430f * (__SBREF(__a_sb, 0, 3)))) + (0.0440f * (__SBREF(__a_sb, 0, 4)))) + (0.0450f * (__SBREF(__a_sb, 1, -4)))) + (0.0460f * (__SBREF(__a_sb, 1, -3)))) + (0.0470f * (__SBREF(__a_sb, 1, -2)))) + (0.0480f * (__SBREF(__a_sb, 1, -1)))) + (0.0490f * (__SBREF(__a_sb, 1, 0)))) + (0.0500f * (__SBREF(__a_sb, 1, 1)))) + (0.0510f * (__SBREF(__a_sb, 1, 2)))) + (0.0520f * (__SBREF(__a_sb, 1, 3)))) + (0.0530f * (__SBREF(__a_sb, 1, 4)))) + (0.0540f * (__SBREF(__a_sb, 2, -4)))) + (0.0550f * (__SBREF(__a_sb, 2, -3)))) + (0.0560f * (__SBREF(__a_sb, 2, -2)))) + (0.0570f * (__SBREF(__a_sb, 2, -1)))) + (0.0580f * (__SBREF(__a_sb, 2, 0)))) + (0.0590f * (__SBREF(__a_sb, 2, 1)))) + (0.0600f * (__SBREF(__a_sb, 2, 2)))) + (0.0610f * (__SBREF(__a_sb, 2, 3)))) + (0.0620f * (__SBREF(__a_sb, 2, 4)))) + (0.0630f * (__SBREF(__a_sb, 3, -4)))) + (0.0640f * (__SBREF(__a_sb, 3, -3)))) + (0.0650f * (__SBREF(__a_sb, 3, -2)))) + (0.0660f * (__SBREF(__a_sb, 3, -1)))) + (0.0670f * (__SBREF(__a_sb, 3, 0)))) + (0.0680f * (__SBREF(__a_sb, 3, 1)))) + (0.0690f * (__SBREF(__a_sb, 3, 2)))) + (0.0700f * (__SBREF(__a_sb, 3, 3)))) + (0.0710f * (__SBREF(__a_sb, 3, 4)))) + (0.0720f * (__SBREF(__a_sb, 4, -4)))) + (0.0730f * (__SBREF(__a_sb, 4, -3)))) + (0.0740f * (__SBREF(__a_sb, 4, -2)))) + (0.0750f * (__SBREF(__a_sb, 4, -1)))) + (0.0760f * (__SBREF(__a_sb, 4, 0)))) + (0.0770f * (__SBREF(__a_sb, 4, 1)))) + (0.0780f * (__SBREF(__a_sb, 4, 2)))) + (0.0790f * (__SBREF(__a_sb, 4, 3)))) + (0.0800f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.248f) * (__REGREF(__a, 0, 0)))) + (0.0011f * (__SBREF(__a_sb, -4, -4)))) + (0.0021f * (__SBREF(__a_sb, -4, -3)))) + (0.0031f * (__SBREF(__a_sb, -4, -2)))) + (0.0041f * (__SBREF(__a_sb, -4, -1)))) + (0.0051f * (__SBREF(__a_sb, -4, 0)))) + (0.0061f * (__SBREF(__a_sb, -4, 1)))) + (0.0071f * (__SBREF(__a_sb, -4, 2)))) + (0.0081f * (__SBREF(__a_sb, -4, 3)))) + (0.0091f * (__SBREF(__a_sb, -4, 4)))) + (0.0101f * (__SBREF(__a_sb, -3, -4)))) + (0.0111f * (__SBREF(__a_sb, -3, -3)))) + (0.0121f * (__SBREF(__a_sb, -3, -2)))) + (0.0131f * (__SBREF(__a_sb, -3, -1)))) + (0.0141f * (__SBREF(__a_sb, -3, 0)))) + (0.0151f * (__SBREF(__a_sb, -3, 1)))) + (0.0161f * (__SBREF(__a_sb, -3, 2)))) + (0.0171f * (__SBREF(__a_sb, -3, 3)))) + (0.0181f * (__SBREF(__a_sb, -3, 4)))) + (0.0191f * (__SBREF(__a_sb, -2, -4)))) + (0.0201f * (__SBREF(__a_sb, -2, -3)))) + (0.0211f * (__SBREF(__a_sb, -2, -2)))) + (0.0221f * (__SBREF(__a_sb, -2, -1)))) + (0.0231f * (__SBREF(__a_sb, -2, 0)))) + (0.0241f * (__SBREF(__a_sb, -2, 1)))) + (0.0251f * (__SBREF(__a_sb, -2, 2)))) + (0.0261f * (__SBREF(__a_sb, -2, 3)))) + (0.0271f * (__SBREF(__a_sb, -2, 4)))) + (0.0281f * (__SBREF(__a_sb, -1, -4)))) + (0.0291f * (__SBREF(__a_sb, -1, -3)))) + (0.0301f * (__SBREF(__a_sb, -1, -2)))) + (0.0311f * (__SBREF(__a_sb, -1, -1)))) + (0.0321f * (__SBREF(__a_sb, -1, 0)))) + (0.0331f * (__SBREF(__a_sb, -1, 1)))) + (0.0341f * (__SBREF(__a_sb, -1, 2)))) + (0.0351f * (__SBREF(__a_sb, -1, 3)))) + (0.0361f * (__SBREF(__a_sb, -1, 4)))) + (0.0371f * (__SBREF(__a_sb, 0, -4)))) + (0.0381f * (__SBREF(__a_sb, 0, -3)))) + (0.0391f * (__SBREF(__a_sb, 0, -2)))) + (0.0401f * (__SBREF(__a_sb, 0, -1)))) + (0.0411f * (__SBREF(__a_sb, 0, 1)))) + (0.0421f * (__SBREF(__a_sb, 0, 2)))) + (0.0431f * (__SBREF(__a_sb, 0, 3)))) + (0.0441f * (__SBREF(__a_sb, 0, 4)))) + (0.0451f * (__SBREF(__a_sb, 1, -4)))) + (0.0461f * (__SBREF(__a_sb, 1, -3)))) + (0.0471f * (__SBREF(__a_sb, 1, -2)))) + (0.0481f * (__SBREF(__a_sb, 1, -1)))) + (0.0491f * (__SBREF(__a_sb, 1, 0)))) + (0.0501f * (__SBREF(__a_sb, 1, 1)))) + (0.0511f * (__SBREF(__a_sb, 1, 2)))) + (0.0521f * (__SBREF(__a_sb, 1, 3)))) + (0.0531f * (__SBREF(__a_sb, 1, 4)))) + (0.0541f * (__SBREF(__a_sb, 2, -4)))) + (0.0551f * (__SBREF(__a_sb, 2, -3)))) + (0.0561f * (__SBREF(__a_sb, 2, -2)))) + (0.0571f * (__SBREF(__a_sb, 2, -1)))) + (0.0581f * (__SBREF(__a_sb, 2, 0)))) + (0.0591f * (__SBREF(__a_sb, 2, 1)))) + (0.0601f * (__SBREF(__a_sb, 2, 2)))) + (0.0611f * (__SBREF(__a_sb, 2, 3)))) + (0.0621f * (__SBREF(__a_sb, 2, 4)))) + (0.0631f * (__SBREF(__a_sb, 3, -4)))) + (0.0641f * (__SBREF(__a_sb, 3, -3)))) + (0.0651f * (__SBREF(__a_sb, 3, -2)))) + (0.0661f * (__SBREF(__a_sb, 3, -1)))) + (0.0671f * (__SBREF(__a_sb, 3, 0)))) + (0.0681f * (__SBREF(__a_sb, 3, 1)))) + (0.0691f * (__SBREF(__a_sb, 3, 2)))) + (0.0701f * (__SBREF(__a_sb, 3, 3)))) + (0.0711f * (__SBREF(__a_sb, 3, 4)))) + (0.0721f * (__SBREF(__a_sb, 4, -4)))) + (0.0731f * (__SBREF(__a_sb, 4, -3)))) + (0.0741f * (__SBREF(__a_sb, 4, -2)))) + (0.0751f * (__SBREF(__a_sb, 4, -1)))) + (0.0761f * (__SBREF(__a_sb, 4, 0)))) + (0.0771f * (__SBREF(__a_sb, 4, 1)))) + (0.0781f * (__SBREF(__a_sb, 4, 2)))) + (0.0791f * (__SBREF(__a_sb, 4, 3)))) + (0.0801f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_1(out, a) do { double etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.256f) * (__REGREF(__a, 0, 0)))) + (0.0012f * (__SBREF(__a_sb, -4, -4)))) + (0.0022f * (__SBREF(__a_sb, -4, -3)))) + (0.0032f * (__SBREF(__a_sb, -4, -2)))) + (0.0042f * (__SBREF(__a_sb, -4, -1)))) + (0.0052f * (__SBREF(__a_sb, -4, 0)))) + (0.0062f * (__SBREF(__a_sb, -4, 1)))) + (0.0072f * (__SBREF(__a_sb, -4, 2)))) + (0.0082f * (__SBREF(__a_sb, -4, 3)))) + (0.0092f * (__SBREF(__a_sb, -4, 4)))) + (0.0102f * (__SBREF(__a_sb, -3, -4)))) + (0.0112f * (__SBREF(__a_sb, -3, -3)))) + (0.0122f * (__SBREF(__a_sb, -3, -2)))) + (0.0132f * (__SBREF(__a_sb, -3, -1)))) + (0.0142f * (__SBREF(__a_sb, -3, 0)))) + (0.0152f * (__SBREF(__a_sb, -3, 1)))) + (0.0162f * (__SBREF(__a_sb, -3, 2)))) + (0.0172f * (__SBREF(__a_sb, -3, 3)))) + (0.0182f * (__SBREF(__a_sb, -3, 4)))) + (0.0192f * (__SBREF(__a_sb, -2, -4)))) + (0.0202f * (__SBREF(__a_sb, -2, -3)))) + (0.0212f * (__SBREF(__a_sb, -2, -2)))) + (0.0222f * (__SBREF(__a_sb, -2, -1)))) + (0.0232f * (__SBREF(__a_sb, -2, 0)))) + (0.0242f * (__SBREF(__a_sb, -2, 1)))) + (0.0252f * (__SBREF(__a_sb, -2, 2)))) + (0.0262f * (__SBREF(__a_sb, -2, 3)))) + (0.0272f * (__SBREF(__a_sb, -2, 4)))) + (0.0282f * (__SBREF(__a_sb, -1, -4)))) + (0.0292f * (__SBREF(__a_sb, -1, -3)))) + (0.0302f * (__SBREF(__a_sb, -1, -2)))) + (0.0312f * (__SBREF(__a_sb, -1, -1)))) + (0.0322f * (__SBREF(__a_sb, -1, 0)))) + (0.0332f * (__SBREF(__a_sb, -1, 1)))) + (0.0342f * (__SBREF(__a_sb, -1, 2)))) + (0.0352f * (__SBREF(__a_sb, -1, 3)))) + (0.0362f * (__SBREF(__a_sb, -1, 4)))) + (0.0372f * (__SBREF(__a_sb, 0, -4)))) + (0.0382f * (__SBREF(__a_sb, 0, -3)))) + (0.0392f * (__SBREF(__a_sb, 0, -2)))) + (0.0402f * (__SBREF(__a_sb, 0, -1)))) + (0.0412f * (__SBREF(__a_sb, 0, 1)))) + (0.0422f * (__SBREF(__a_sb, 0, 2)))) + (0.0432f * (__SBREF(__a_sb, 0, 3)))) + (0.0442f * (__SBREF(__a_sb, 0, 4)))) + (0.0452f * (__SBREF(__a_sb, 1, -4)))) + (0.0462f * (__SBREF(__a_sb, 1, -3)))) + (0.0472f * (__SBREF(__a_sb, 1, -2)))) + (0.0482f * (__SBREF(__a_sb, 1, -1)))) + (0.0492f * (__SBREF(__a_sb, 1, 0)))) + (0.0502f * (__SBREF(__a_sb, 1, 1)))) + (0.0512f * (__SBREF(__a_sb, 1, 2)))) + (0.0522f * (__SBREF(__a_sb, 1, 3)))) + (0.0532f * (__SBREF(__a_sb, 1, 4)))) + (0.0542f * (__SBREF(__a_sb, 2, -4)))) + (0.0552f * (__SBREF(__a_sb, 2, -3)))) + (0.0562f * (__SBREF(__a_sb, 2, -2)))) + (0.0572f * (__SBREF(__a_sb, 2, -1)))) + (0.0582f * (__SBREF(__a_sb, 2, 0)))) + (0.0592f * (__SBREF(__a_sb, 2, 1)))) + (0.0602f * (__SBREF(__a_sb, 2, 2)))) + (0.0612f * (__SBREF(__a_sb, 2, 3)))) + (0.0622f * (__SBREF(__a_sb, 2, 4)))) + (0.0632f * (__SBREF(__a_sb, 3, -4)))) + (0.0642f * (__SBREF(__a_sb, 3, -3)))) + (0.0652f * (__SBREF(__a_sb, 3, -2)))) + (0.0662f * (__SBREF(__a_sb, 3, -1)))) + (0.0672f * (__SBREF(__a_sb, 3, 0)))) + (0.0682f * (__SBREF(__a_sb, 3, 1)))) + (0.0692f * (__SBREF(__a_sb, 3, 2)))) + (0.0702f * (__SBREF(__a_sb, 3, 3)))) + (0.0712f * (__SBREF(__a_sb, 3, 4)))) + (0.0722f * (__SBREF(__a_sb, 4, -4)))) + (0.0732f * (__SBREF(__a_sb, 4, -3)))) + (0.0742f * (__SBREF(__a_sb, 4, -2)))) + (0.0752f * (__SBREF(__a_sb, 4, -1)))) + (0.0762f * (__SBREF(__a_sb, 4, 0)))) + (0.0772f * (__SBREF(__a_sb, 4, 1)))) + (0.0782f * (__SBREF(__a_sb, 4, 2)))) + (0.0792f * (__SBREF(__a_sb, 4, 3)))) + (0.0802f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_2(out, a) do { double etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_3_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.264f) * (__REGREF(__a, 0, 0)))) + (0.0013f * (__SBREF(__a_sb, -4, -4)))) + (0.0023f * (__SBREF(__a_sb, -4, -3)))) + (0.0033f * (__SBREF(__a_sb, -4, -2)))) + (0.0043f * (__SBREF(__a_sb, -4, -1)))) + (0.0053f * (__SBREF(__a_sb, -4, 0)))) + (0.0063f * (__SBREF(__a_sb, -4, 1)))) + (0.0073f * (__SBREF(__a_sb, -4, 2)))) + (0.0083f * (__SBREF(__a_sb, -4, 3)))) + (0.0093f * (__SBREF(__a_sb, -4, 4)))) + (0.0103f * (__SBREF(__a_sb, -3, -4)))) + (0.0113f * (__SBREF(__a_sb, -3, -3)))) + (0.0123f * (__SBREF(__a_sb, -3, -2)))) + (0.0133f * (__SBREF(__a_sb, -3, -1)))) + (0.0143f * (__SBREF(__a_sb, -3, 0)))) + (0.0153f * (__SBREF(__a_sb, -3, 1)))) + (0.0163f * (__SBREF(__a_sb, -3, 2)))) + (0.0173f * (__SBREF(__a_sb, -3, 3)))) + (0.0183f * (__SBREF(__a_sb, -3, 4)))) + (0.0193f * (__SBREF(__a_sb, -2, -4)))) + (0.0203f * (__SBREF(__a_sb, -2, -3)))) + (0.0213f * (__SBREF(__a_sb, -2, -2)))) + (0.0223f * (__SBREF(__a_sb, -2, -1)))) + (0.0233f * (__SBREF(__a_sb, -2, 0)))) + (0.0243f * (__SBREF(__a_sb, -2, 1)))) + (0.0253f * (__SBREF(__a_sb, -2, 2)))) + (0.0263f * (__SBREF(__a_sb, -2, 3)))) + (0.0273f * (__SBREF(__a_sb, -2, 4)))) + (0.0283f * (__SBREF(__a_sb, -1, -4)))) + (0.0293f * (__SBREF(__a_sb, -1, -3)))) + (0.0303f * (__SBREF(__a_sb, -1, -2)))) + (0.0313f * (__SBREF(__a_sb, -1, -1)))) + (0.0323f * (__SBREF(__a_sb, -1, 0)))) + (0.0333f * (__SBREF(__a_sb, -1, 1)))) + (0.0343f * (__SBREF(__a_sb, -1, 2)))) + (0.0353f * (__SBREF(__a_sb, -1, 3)))) + (0.0363f * (__SBREF(__a_sb, -1, 4)))) + (0.0373f * (__SBREF(__a_sb, 0, -4)))) + (0.0383f * (__SBREF(__a_sb, 0, -3)))) + (0.0393f * (__SBREF(__a_sb, 0, -2)))) + (0.0403f * (__SBREF(__a_sb, 0, -1)))) + (0.0413f * (__SBREF(__a_sb, 0, 1)))) + (0.0423f * (__SBREF(__a_sb, 0, 2)))) + (0.0433f * (__SBREF(__a_sb, 0, 3)))) + (0.0443f * (__SBREF(__a_sb, 0, 4)))) + (0.0453f * (__SBREF(__a_sb, 1, -4)))) + (0.0463f * (__SBREF(__a_sb, 1, -3)))) + (0.0473f * (__SBREF(__a_sb, 1, -2)))) + (0.0483f * (__SBREF(__a_sb, 1, -1)))) + (0.0493f * (__SBREF(__a_sb, 1, 0)))) + (0.0503f * (__SBREF(__a_sb, 1, 1)))) + (0.0513f * (__SBREF(__a_sb, 1, 2)))) + (0.0523f * (__SBREF(__a_sb, 1, 3)))) + (0.0533f * (__SBREF(__a_sb, 1, 4)))) + (0.0543f * (__SBREF(__a_sb, 2, -4)))) + (0.0553f * (__SBREF(__a_sb, 2, -3)))) + (0.0563f * (__SBREF(__a_sb, 2, -2)))) + (0.0573f * (__SBREF(__a_sb, 2, -1)))) + (0.0583f * (__SBREF(__a_sb, 2, 0)))) + (0.0593f * (__SBREF(__a_sb, 2, 1)))) + (0.0603f * (__SBREF(__a_sb, 2, 2)))) + (0.0613f * (__SBREF(__a_sb, 2, 3)))) + (0.0623f * (__SBREF(__a_sb, 2, 4)))) + (0.0633f * (__SBREF(__a_sb, 3, -4)))) + (0.0643f * (__SBREF(__a_sb, 3, -3)))) + (0.0653f * (__SBREF(__a_sb, 3, -2)))) + (0.0663f * (__SBREF(__a_sb, 3, -1)))) + (0.0673f * (__SBREF(__a_sb, 3, 0)))) + (0.0683f * (__SBREF(__a_sb, 3, 1)))) + (0.0693f * (__SBREF(__a_sb, 3, 2)))) + (0.0703f * (__SBREF(__a_sb, 3, 3)))) + (0.0713f * (__SBREF(__a_sb, 3, 4)))) + (0.0723f * (__SBREF(__a_sb, 4, -4)))) + (0.0733f * (__SBREF(__a_sb, 4, -3)))) + (0.0743f * (__SBREF(__a_sb, 4, -2)))) + (0.0753f * (__SBREF(__a_sb, 4, -1)))) + (0.0763f * (__SBREF(__a_sb, 4, 0)))) + (0.0773f * (__SBREF(__a_sb, 4, 1)))) + (0.0783f * (__SBREF(__a_sb, 4, 2)))) + (0.0793f * (__SBREF(__a_sb, 4, 3)))) + (0.0803f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_3(out, a) do { double etmp; __CALCEXPR_3_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_4_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.272f) * (__REGREF(__a, 0, 0)))) + (0.0014f * (__SBREF(__a_sb, -4, -4)))) + (0.0024f * (__SBREF(__a_sb, -4, -3)))) + (0.0034f * (__SBREF(__a_sb, -4, -2)))) + (0.0044f * (__SBREF(__a_sb, -4, -1)))) + (0.0054f * (__SBREF(__a_sb, -4, 0)))) + (0.0064f * (__SBREF(__a_sb, -4, 1)))) + (0.0074f * (__SBREF(__a_sb, -4, 2)))) + (0.0084f * (__SBREF(__a_sb, -4, 3)))) + (0.0094f * (__SBREF(__a_sb, -4, 4)))) + (0.0104f * (__SBREF(__a_sb, -3, -4)))) + (0.0114f * (__SBREF(__a_sb, -3, -3)))) + (0.0124f * (__SBREF(__a_sb, -3, -2)))) + (0.0134f * (__SBREF(__a_sb, -3, -1)))) + (0.0144f * (__SBREF(__a_sb, -3, 0)))) + (0.0154f * (__SBREF(__a_sb, -3, 1)))) + (0.0164f * (__SBREF(__a_sb, -3, 2)))) + (0.0174f * (__SBREF(__a_sb, -3, 3)))) + (0.0184f * (__SBREF(__a_sb, -3, 4)))) + (0.0194f * (__SBREF(__a_sb, -2, -4)))) + (0.0204f * (__SBREF(__a_sb, -2, -3)))) + (0.0214f * (__SBREF(__a_sb, -2, -2)))) + (0.0224f * (__SBREF(__a_sb, -2, -1)))) + (0.0234f * (__SBREF(__a_sb, -2, 0)))) + (0.0244f * (__SBREF(__a_sb, -2, 1)))) + (0.0254f * (__SBREF(__a_sb, -2, 2)))) + (0.0264f * (__SBREF(__a_sb, -2, 3)))) + (0.0274f * (__SBREF(__a_sb, -2, 4)))) + (0.0284f * (__SBREF(__a_sb, -1, -4)))) + (0.0294f * (__SBREF(__a_sb, -1, -3)))) + (0.0304f * (__SBREF(__a_sb, -1, -2)))) + (0.0314f * (__SBREF(__a_sb, -1, -1)))) + (0.0324f * (__SBREF(__a_sb, -1, 0)))) + (0.0334f * (__SBREF(__a_sb, -1, 1)))) + (0.0344f * (__SBREF(__a_sb, -1, 2)))) + (0.0354f * (__SBREF(__a_sb, -1, 3)))) + (0.0364f * (__SBREF(__a_sb, -1, 4)))) + (0.0374f * (__SBREF(__a_sb, 0, -4)))) + (0.0384f * (__SBREF(__a_sb, 0, -3)))) + (0.0394f * (__SBREF(__a_sb, 0, -2)))) + (0.0404f * (__SBREF(__a_sb, 0, -1)))) + (0.0414f * (__SBREF(__a_sb, 0, 1)))) + (0.0424f * (__SBREF(__a_sb, 0, 2)))) + (0.0434f * (__SBREF(__a_sb, 0, 3)))) + (0.0444f * (__SBREF(__a_sb, 0, 4)))) + (0.0454f * (__SBREF(__a_sb, 1, -4)))) + (0.0464f * (__SBREF(__a_sb, 1, -3)))) + (0.0474f * (__SBREF(__a_sb, 1, -2)))) + (0.0484f * (__SBREF(__a_sb, 1, -1)))) + (0.0494f * (__SBREF(__a_sb, 1, 0)))) + (0.0504f * (__SBREF(__a_sb, 1, 1)))) + (0.0514f * (__SBREF(__a_sb, 1, 2)))) + (0.0524f * (__SBREF(__a_sb, 1, 3)))) + (0.0534f * (__SBREF(__a_sb, 1, 4)))) + (0.0544f * (__SBREF(__a_sb, 2, -4)))) + (0.0554f * (__SBREF(__a_sb, 2, -3)))) + (0.0564f * (__SBREF(__a_sb, 2, -2)))) + (0.0574f * (__SBREF(__a_sb, 2, -1)))) + (0.0584f * (__SBREF(__a_sb, 2, 0)))) + (0.0594f * (__SBREF(__a_sb, 2, 1)))) + (0.0604f * (__SBREF(__a_sb, 2, 2)))) + (0.0614f * (__SBREF(__a_sb, 2, 3)))) + (0.0624f * (__SBREF(__a_sb, 2, 4)))) + (0.0634f * (__SBREF(__a_sb, 3, -4)))) + (0.0644f * (__SBREF(__a_sb, 3, -3)))) + (0.0654f * (__SBREF(__a_sb, 3, -2)))) + (0.0664f * (__SBREF(__a_sb, 3, -1)))) + (0.0674f * (__SBREF(__a_sb, 3, 0)))) + (0.0684f * (__SBREF(__a_sb, 3, 1)))) + (0.0694f * (__SBREF(__a_sb, 3, 2)))) + (0.0704f * (__SBREF(__a_sb, 3, 3)))) + (0.0714f * (__SBREF(__a_sb, 3, 4)))) + (0.0724f * (__SBREF(__a_sb, 4, -4)))) + (0.0734f * (__SBREF(__a_sb, 4, -3)))) + (0.0744f * (__SBREF(__a_sb, 4, -2)))) + (0.0754f * (__SBREF(__a_sb, 4, -1)))) + (0.0764f * (__SBREF(__a_sb, 4, 0)))) + (0.0774f * (__SBREF(__a_sb, 4, 1)))) + (0.0784f * (__SBREF(__a_sb, 4, 2)))) + (0.0794f * (__SBREF(__a_sb, 4, 3)))) + (0.0804f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_4(out, a) do { double etmp; __CALCEXPR_4_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_5_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.280f) * (__REGREF(__a, 0, 0)))) + (0.0015f * (__SBREF(__a_sb, -4, -4)))) + (0.0025f * (__SBREF(__a_sb, -4, -3)))) + (0.0035f * (__SBREF(__a_sb, -4, -2)))) + (0.0045f * (__SBREF(__a_sb, -4, -1)))) + (0.0055f * (__SBREF(__a_sb, -4, 0)))) + (0.0065f * (__SBREF(__a_sb, -4, 1)))) + (0.0075f * (__SBREF(__a_sb, -4, 2)))) + (0.0085f * (__SBREF(__a_sb, -4, 3)))) + (0.0095f * (__SBREF(__a_sb, -4, 4)))) + (0.0105f * (__SBREF(__a_sb, -3, -4)))) + (0.0115f * (__SBREF(__a_sb, -3, -3)))) + (0.0125f * (__SBREF(__a_sb, -3, -2)))) + (0.0135f * (__SBREF(__a_sb, -3, -1)))) + (0.0145f * (__SBREF(__a_sb, -3, 0)))) + (0.0155f * (__SBREF(__a_sb, -3, 1)))) + (0.0165f * (__SBREF(__a_sb, -3, 2)))) + (0.0175f * (__SBREF(__a_sb, -3, 3)))) + (0.0185f * (__SBREF(__a_sb, -3, 4)))) + (0.0195f * (__SBREF(__a_sb, -2, -4)))) + (0.0205f * (__SBREF(__a_sb, -2, -3)))) + (0.0215f * (__SBREF(__a_sb, -2, -2)))) + (0.0225f * (__SBREF(__a_sb, -2, -1)))) + (0.0235f * (__SBREF(__a_sb, -2, 0)))) + (0.0245f * (__SBREF(__a_sb, -2, 1)))) + (0.0255f * (__SBREF(__a_sb, -2, 2)))) + (0.0265f * (__SBREF(__a_sb, -2, 3)))) + (0.0275f * (__SBREF(__a_sb, -2, 4)))) + (0.0285f * (__SBREF(__a_sb, -1, -4)))) + (0.0295f * (__SBREF(__a_sb, -1, -3)))) + (0.0305f * (__SBREF(__a_sb, -1, -2)))) + (0.0315f * (__SBREF(__a_sb, -1, -1)))) + (0.0325f * (__SBREF(__a_sb, -1, 0)))) + (0.0335f * (__SBREF(__a_sb, -1, 1)))) + (0.0345f * (__SBREF(__a_sb, -1, 2)))) + (0.0355f * (__SBREF(__a_sb, -1, 3)))) + (0.0365f * (__SBREF(__a_sb, -1, 4)))) + (0.0375f * (__SBREF(__a_sb, 0, -4)))) + (0.0385f * (__SBREF(__a_sb, 0, -3)))) + (0.0395f * (__SBREF(__a_sb, 0, -2)))) + (0.0405f * (__SBREF(__a_sb, 0, -1)))) + (0.0415f * (__SBREF(__a_sb, 0, 1)))) + (0.0425f * (__SBREF(__a_sb, 0, 2)))) + (0.0435f * (__SBREF(__a_sb, 0, 3)))) + (0.0445f * (__SBREF(__a_sb, 0, 4)))) + (0.0455f * (__SBREF(__a_sb, 1, -4)))) + (0.0465f * (__SBREF(__a_sb, 1, -3)))) + (0.0475f * (__SBREF(__a_sb, 1, -2)))) + (0.0485f * (__SBREF(__a_sb, 1, -1)))) + (0.0495f * (__SBREF(__a_sb, 1, 0)))) + (0.0505f * (__SBREF(__a_sb, 1, 1)))) + (0.0515f * (__SBREF(__a_sb, 1, 2)))) + (0.0525f * (__SBREF(__a_sb, 1, 3)))) + (0.0535f * (__SBREF(__a_sb, 1, 4)))) + (0.0545f * (__SBREF(__a_sb, 2, -4)))) + (0.0555f * (__SBREF(__a_sb, 2, -3)))) + (0.0565f * (__SBREF(__a_sb, 2, -2)))) + (0.0575f * (__SBREF(__a_sb, 2, -1)))) + (0.0585f * (__SBREF(__a_sb, 2, 0)))) + (0.0595f * (__SBREF(__a_sb, 2, 1)))) + (0.0605f * (__SBREF(__a_sb, 2, 2)))) + (0.0615f * (__SBREF(__a_sb, 2, 3)))) + (0.0625f * (__SBREF(__a_sb, 2, 4)))) + (0.0635f * (__SBREF(__a_sb, 3, -4)))) + (0.0645f * (__SBREF(__a_sb, 3, -3)))) + (0.0655f * (__SBREF(__a_sb, 3, -2)))) + (0.0665f * (__SBREF(__a_sb, 3, -1)))) + (0.0675f * (__SBREF(__a_sb, 3, 0)))) + (0.0685f * (__SBREF(__a_sb, 3, 1)))) + (0.0695f * (__SBREF(__a_sb, 3, 2)))) + (0.0705f * (__SBREF(__a_sb, 3, 3)))) + (0.0715f * (__SBREF(__a_sb, 3, 4)))) + (0.0725f * (__SBREF(__a_sb, 4, -4)))) + (0.0735f * (__SBREF(__a_sb, 4, -3)))) + (0.0745f * (__SBREF(__a_sb, 4, -2)))) + (0.0755f * (__SBREF(__a_sb, 4, -1)))) + (0.0765f * (__SBREF(__a_sb, 4, 0)))) + (0.0775f * (__SBREF(__a_sb, 4, 1)))) + (0.0785f * (__SBREF(__a_sb, 4, 2)))) + (0.0795f * (__SBREF(__a_sb, 4, 3)))) + (0.0805f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_5(out, a) do { double etmp; __CALCEXPR_5_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_6_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.288f) * (__REGREF(__a, 0, 0)))) + (0.0016f * (__SBREF(__a_sb, -4, -4)))) + (0.0026f * (__SBREF(__a_sb, -4, -3)))) + (0.0036f * (__SBREF(__a_sb, -4, -2)))) + (0.0046f * (__SBREF(__a_sb, -4, -1)))) + (0.0056f * (__SBREF(__a_sb, -4, 0)))) + (0.0066f * (__SBREF(__a_sb, -4, 1)))) + (0.0076f * (__SBREF(__a_sb, -4, 2)))) + (0.0086f * (__SBREF(__a_sb, -4, 3)))) + (0.0096f * (__SBREF(__a_sb, -4, 4)))) + (0.0106f * (__SBREF(__a_sb, -3, -4)))) + (0.0116f * (__SBREF(__a_sb, -3, -3)))) + (0.0126f * (__SBREF(__a_sb, -3, -2)))) + (0.0136f * (__SBREF(__a_sb, -3, -1)))) + (0.0146f * (__SBREF(__a_sb, -3, 0)))) + (0.0156f * (__SBREF(__a_sb, -3, 1)))) + (0.0166f * (__SBREF(__a_sb, -3, 2)))) + (0.0176f * (__SBREF(__a_sb, -3, 3)))) + (0.0186f * (__SBREF(__a_sb, -3, 4)))) + (0.0196f * (__SBREF(__a_sb, -2, -4)))) + (0.0206f * (__SBREF(__a_sb, -2, -3)))) + (0.0216f * (__SBREF(__a_sb, -2, -2)))) + (0.0226f * (__SBREF(__a_sb, -2, -1)))) + (0.0236f * (__SBREF(__a_sb, -2, 0)))) + (0.0246f * (__SBREF(__a_sb, -2, 1)))) + (0.0256f * (__SBREF(__a_sb, -2, 2)))) + (0.0266f * (__SBREF(__a_sb, -2, 3)))) + (0.0276f * (__SBREF(__a_sb, -2, 4)))) + (0.0286f * (__SBREF(__a_sb, -1, -4)))) + (0.0296f * (__SBREF(__a_sb, -1, -3)))) + (0.0306f * (__SBREF(__a_sb, -1, -2)))) + (0.0316f * (__SBREF(__a_sb, -1, -1)))) + (0.0326f * (__SBREF(__a_sb, -1, 0)))) + (0.0336f * (__SBREF(__a_sb, -1, 1)))) + (0.0346f * (__SBREF(__a_sb, -1, 2)))) + (0.0356f * (__SBREF(__a_sb, -1, 3)))) + (0.0366f * (__SBREF(__a_sb, -1, 4)))) + (0.0376f * (__SBREF(__a_sb, 0, -4)))) + (0.0386f * (__SBREF(__a_sb, 0, -3)))) + (0.0396f * (__SBREF(__a_sb, 0, -2)))) + (0.0406f * (__SBREF(__a_sb, 0, -1)))) + (0.0416f * (__SBREF(__a_sb, 0, 1)))) + (0.0426f * (__SBREF(__a_sb, 0, 2)))) + (0.0436f * (__SBREF(__a_sb, 0, 3)))) + (0.0446f * (__SBREF(__a_sb, 0, 4)))) + (0.0456f * (__SBREF(__a_sb, 1, -4)))) + (0.0466f * (__SBREF(__a_sb, 1, -3)))) + (0.0476f * (__SBREF(__a_sb, 1, -2)))) + (0.0486f * (__SBREF(__a_sb, 1, -1)))) + (0.0496f * (__SBREF(__a_sb, 1, 0)))) + (0.0506f * (__SBREF(__a_sb, 1, 1)))) + (0.0516f * (__SBREF(__a_sb, 1, 2)))) + (0.0526f * (__SBREF(__a_sb, 1, 3)))) + (0.0536f * (__SBREF(__a_sb, 1, 4)))) + (0.0546f * (__SBREF(__a_sb, 2, -4)))) + (0.0556f * (__SBREF(__a_sb, 2, -3)))) + (0.0566f * (__SBREF(__a_sb, 2, -2)))) + (0.0576f * (__SBREF(__a_sb, 2, -1)))) + (0.0586f * (__SBREF(__a_sb, 2, 0)))) + (0.0596f * (__SBREF(__a_sb, 2, 1)))) + (0.0606f * (__SBREF(__a_sb, 2, 2)))) + (0.0616f * (__SBREF(__a_sb, 2, 3)))) + (0.0626f * (__SBREF(__a_sb, 2, 4)))) + (0.0636f * (__SBREF(__a_sb, 3, -4)))) + (0.0646f * (__SBREF(__a_sb, 3, -3)))) + (0.0656f * (__SBREF(__a_sb, 3, -2)))) + (0.0666f * (__SBREF(__a_sb, 3, -1)))) + (0.0676f * (__SBREF(__a_sb, 3, 0)))) + (0.0686f * (__SBREF(__a_sb, 3, 1)))) + (0.0696f * (__SBREF(__a_sb, 3, 2)))) + (0.0706f * (__SBREF(__a_sb, 3, 3)))) + (0.0716f * (__SBREF(__a_sb, 3, 4)))) + (0.0726f * (__SBREF(__a_sb, 4, -4)))) + (0.0736f * (__SBREF(__a_sb, 4, -3)))) + (0.0746f * (__SBREF(__a_sb, 4, -2)))) + (0.0756f * (__SBREF(__a_sb, 4, -1)))) + (0.0766f * (__SBREF(__a_sb, 4, 0)))) + (0.0776f * (__SBREF(__a_sb, 4, 1)))) + (0.0786f * (__SBREF(__a_sb, 4, 2)))) + (0.0796f * (__SBREF(__a_sb, 4, 3)))) + (0.0806f * (__SBREF(__a_sb, 4, 4)))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_6(out, a) do { double etmp; __CALCEXPR_6_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_7_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.296f) * (__REGREF(__a, 0, 0)))) + (0.0017f * (__SBREF(__a_sb, -4, -4)))) + (0.0027f * (__SBREF(__a_sb, -4, -3)))) + (0.0037f * (__SBREF(__a_sb, -4, -2)))) + (0.0047f * (__SBREF(__a_sb, -4, -1)))) + (0.0057f * (__SBREF(__a_sb, -4, 0)))) + (0.0067f * (__SBREF(__a_sb, -4, 1)))) + (0.0077f * (__SBREF(__a_sb, -4, 2)))) + (0.0087f * (__SBREF(__a_sb, -4, 3)))) + (0.0097f * (__SBREF(__a_sb, -4, 4)))) + (0.0107f * (__SBREF(__a_sb, -3, -4)))) + (0.0117f * (__SBREF(__a_sb, -3, -3)))) + (0.0127f * (__SBREF(__a_sb, -3, -2)))) + (0.0137f * (__SBREF(__a_sb, -3, -1)))) + (0.0147f * (__SBREF(__a_sb, -3, 0)))) + (0.0157f * (__SBREF(__a_sb, -3, 1)))) + (0.0167f * (__SBREF(__a_sb, -3, 2)))) + (0.0177f * (__SBREF(__a_sb, -3, 3)))) + (0.0187f * (__SBREF(__a_sb, -3, 4)))) + (0.0197f * (__SBREF(__a_sb, -2, -4)))) + (0.0207f * (__SBREF(__a_sb, -2, -3)))) + (0.0217f * (__SBREF(__a_sb, -2, -2)))) + (0.0227f * (__SBREF(__a_sb, -2, -1)))) + (0.0237f * (__SBREF(__a_sb, -2, 0)))) + (0.0247f * (__SBREF(__a_sb, -2, 1)))) + (0.0257f * (__SBREF(__a_sb, -2, 2)))) + (0.0267f * (__SBREF(__a_sb, -2, 3)))) + (0.0277f * (__SBREF(__a_sb, -2, 4)))) + (0.0287f * (__SBREF(__a_sb, -1, -4)))) + (0.0297f * (__SBREF(__a_sb, -1, -3)))) + (0.0307f * (__SBREF(__a_sb, -1, -2)))) + (0.0317f * (__SBREF(__a_sb, -1, -1)))) + (0.0327f * (__SBREF(__a_sb, -1, 0)))) + (0.0337f * (__SBREF(__a_sb, -1, 1)))) + (0.0347f * (__SBREF(__a_sb, -1, 2)))) + (0.0357f * (__SBREF(__a_sb, -1, 3)))) + (0.0367f * (__SBREF(__a_sb, -1, 4)))) + (0.0377f * (__SBREF(__a_sb, 0, -4)))) + (0.0387f * (__SBREF(__a_sb, 0, -3)))) + (0.0397f * (__SBREF(__a_sb, 0, -2)))) + (0.0407f * (__SBREF(__a_sb, 0, -1)))) + (0.0417f * (__SBREF(__a_sb, 0, 1)))) + (0.0427f * (__SBREF(__a_sb, 0, 2)))) + (0.0437f * (__SBREF(__a_sb, 0, 3)))) + (0.0447f * (__SBREF(__a_sb, 0, 4)))) + (0.0457f * (__SBREF(__a_sb, 1, -4)))) + (0.0467f * (__SBREF(__a_sb, 1, -3)))) + (0.0477f * (__SBREF(__a_sb, 1, -2)))) + (0.0487f * (__SBREF(__a_sb, 1, -1)))) + (0.0497f * (__SBREF(__a_sb, 1, 0)))) + (0.0507f * (__SBREF(__a_sb, 1, 1)))) + (0.0517f * (__SBREF(__a_sb, 1, 2)))) + (0.0527f * (__SBREF(__a_sb, 1, 3)))) + (0.0537f * (__SBREF(__a_sb, 1, 4)))) + (0.0547f * (__SBREF(__a_sb, 2, -4)))) + (0.0557f * (__SBREF(__a_sb, 2, -3)))) + (0.0567f * (__SBREF(__a_sb, 2, -2)))) + (0.0577f * (__SBREF(__a_sb, 2, -1)))) + (0.0587f * (__SBREF(__a_sb, 2, 0)))) + (0.0597f * (__SBREF(__a_sb, 2, 1)))) + (0.0607f * (__SBREF(__a_sb, 2, 2)))) + (0.0617f * (__SBREF(__a_sb, 2, 3)))) + (0.0627f * (__SBREF(__a_sb, 2, 4)))) + (0.0637f * (__SBREF(__a_sb, 3, -4)))) + (0.0647f * (__SBREF(__a_sb, 3, -3)))) + (0.0657f * (__SBREF(__a_sb, 3, -2)))) + (0.0667f * (__SBREF(__a_sb, 3, -1)))) + (0.0677f * (__SBREF(__a_sb, 3, 0)))) + (0.0687f * (__SBREF(__a_sb, 3, 1)))) + (0.0697f * (__SBREF(__a_sb, 3, 2)))) + (0.0707f * (__SBREF(__a_sb, 3, 3)))) + (0.0717f * (__SBREF(__a_sb, 3, 4)))) + (0.0727f * (__SBREF(__a_sb, 4, -4)))) + (0.0737f * (__SBREF(__a_sb, 4, -3)))) + (0.0747f * (__SBREF(__a_sb, 4, -2)))) + (0.0757f * (__SBREF(__a_sb, 4, -1)))) + (0.0767f * (__SBREF(__a_sb, 4, 0)))) + (0.0777f * (__SBREF(__a_sb, 4, 1)))) + (0.0787f * (__SBREF(__a_sb, 4, 2)))) + (0.0797f * (__SBREF(__a_sb, 4, 3)))) + (0.0807f * (__SBREF(__a_sb, 4, 4))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_7(out, a) do { double etmp; __CALCEXPR_7_wrap(etmp, a); out += etmp; } while (0); #define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3]) #define __REGREF(reg, i2, i3) reg #define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3) #define __CALCEXPR_8_wrap(__rn0, __a) do { __rn0 = (((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((((-3.304f) * (__REGREF(__a, 0, 0)))) + (0.0018f * (__SBREF(__a_sb, -4, -4)))) + (0.0028f * (__SBREF(__a_sb, -4, -3)))) + (0.0038f * (__SBREF(__a_sb, -4, -2)))) + (0.0048f * (__SBREF(__a_sb, -4, -1)))) + (0.0058f * (__SBREF(__a_sb, -4, 0)))) + (0.0068f * (__SBREF(__a_sb, -4, 1)))) + (0.0078f * (__SBREF(__a_sb, -4, 2)))) + (0.0088f * (__SBREF(__a_sb, -4, 3)))) + (0.0098f * (__SBREF(__a_sb, -4, 4)))) + (0.0108f * (__SBREF(__a_sb, -3, -4)))) + (0.0118f * (__SBREF(__a_sb, -3, -3)))) + (0.0128f * (__SBREF(__a_sb, -3, -2)))) + (0.0138f * (__SBREF(__a_sb, -3, -1)))) + (0.0148f * (__SBREF(__a_sb, -3, 0)))) + (0.0158f * (__SBREF(__a_sb, -3, 1)))) + (0.0168f * (__SBREF(__a_sb, -3, 2)))) + (0.0178f * (__SBREF(__a_sb, -3, 3)))) + (0.0188f * (__SBREF(__a_sb, -3, 4)))) + (0.0198f * (__SBREF(__a_sb, -2, -4)))) + (0.0208f * (__SBREF(__a_sb, -2, -3)))) + (0.0218f * (__SBREF(__a_sb, -2, -2)))) + (0.0228f * (__SBREF(__a_sb, -2, -1)))) + (0.0238f * (__SBREF(__a_sb, -2, 0)))) + (0.0248f * (__SBREF(__a_sb, -2, 1)))) + (0.0258f * (__SBREF(__a_sb, -2, 2)))) + (0.0268f * (__SBREF(__a_sb, -2, 3)))) + (0.0278f * (__SBREF(__a_sb, -2, 4)))) + (0.0288f * (__SBREF(__a_sb, -1, -4)))) + (0.0298f * (__SBREF(__a_sb, -1, -3)))) + (0.0308f * (__SBREF(__a_sb, -1, -2)))) + (0.0318f * (__SBREF(__a_sb, -1, -1)))) + (0.0328f * (__SBREF(__a_sb, -1, 0)))) + (0.0338f * (__SBREF(__a_sb, -1, 1)))) + (0.0348f * (__SBREF(__a_sb, -1, 2)))) + (0.0358f * (__SBREF(__a_sb, -1, 3)))) + (0.0368f * (__SBREF(__a_sb, -1, 4)))) + (0.0378f * (__SBREF(__a_sb, 0, -4)))) + (0.0388f * (__SBREF(__a_sb, 0, -3)))) + (0.0398f * (__SBREF(__a_sb, 0, -2)))) + (0.0408f * (__SBREF(__a_sb, 0, -1)))) + (0.0418f * (__SBREF(__a_sb, 0, 1)))) + (0.0428f * (__SBREF(__a_sb, 0, 2)))) + (0.0438f * (__SBREF(__a_sb, 0, 3)))) + (0.0448f * (__SBREF(__a_sb, 0, 4)))) + (0.0458f * (__SBREF(__a_sb, 1, -4)))) + (0.0468f * (__SBREF(__a_sb, 1, -3)))) + (0.0478f * (__SBREF(__a_sb, 1, -2)))) + (0.0488f * (__SBREF(__a_sb, 1, -1)))) + (0.0498f * (__SBREF(__a_sb, 1, 0)))) + (0.0508f * (__SBREF(__a_sb, 1, 1)))) + (0.0518f * (__SBREF(__a_sb, 1, 2)))) + (0.0528f * (__SBREF(__a_sb, 1, 3)))) + (0.0538f * (__SBREF(__a_sb, 1, 4)))) + (0.0548f * (__SBREF(__a_sb, 2, -4)))) + (0.0558f * (__SBREF(__a_sb, 2, -3)))) + (0.0568f * (__SBREF(__a_sb, 2, -2)))) + (0.0578f * (__SBREF(__a_sb, 2, -1)))) + (0.0588f * (__SBREF(__a_sb, 2, 0)))) + (0.0598f * (__SBREF(__a_sb, 2, 1)))) + (0.0608f * (__SBREF(__a_sb, 2, 2)))) + (0.0618f * (__SBREF(__a_sb, 2, 3)))) + (0.0628f * (__SBREF(__a_sb, 2, 4)))) + (0.0638f * (__SBREF(__a_sb, 3, -4)))) + (0.0648f * (__SBREF(__a_sb, 3, -3)))) + (0.0658f * (__SBREF(__a_sb, 3, -2)))) + (0.0668f * (__SBREF(__a_sb, 3, -1)))) + (0.0678f * (__SBREF(__a_sb, 3, 0)))) + (0.0688f * (__SBREF(__a_sb, 3, 1)))) + (0.0698f * (__SBREF(__a_sb, 3, 2)))) + (0.0708f * (__SBREF(__a_sb, 3, 3)))) + (0.0718f * (__SBREF(__a_sb, 3, 4)))) + (0.0728f * (__SBREF(__a_sb, 4, -4)))) + (0.0738f * (__SBREF(__a_sb, 4, -3)))) + (0.0748f * (__SBREF(__a_sb, 4, -2)))) + (0.0758f * (__SBREF(__a_sb, 4, -1)))) + (0.0768f * (__SBREF(__a_sb, 4, 0)))) + (0.0778f * (__SBREF(__a_sb, 4, 1)))) + (0.0788f * (__SBREF(__a_sb, 4, 2)))) + (0.0798f * (__SBREF(__a_sb, 4, 3)))) + (0.0808f * (__SBREF(__a_sb, 4, 4)))); } while (0) #define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0) #define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0) #define __CALCEXPR_8(out, a) do { double etmp; __CALCEXPR_8_wrap(etmp, a); out += etmp; } while (0); #define __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); __CALCEXPR_3(out3, reg); __CALCEXPR_4(out4, reg); __CALCEXPR_5(out5, reg); __CALCEXPR_6(out6, reg); __CALCEXPR_7(out7, reg); __CALCEXPR_8(out8, reg); } while (0); #define __CALC1(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, out3, out4, out5, out6, out7, out8, reg); } else out4 = reg; } while (0) #define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0) if (__c1Id == 0) { __LOAD(__reg_0, 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(4, __reg_1_4); } else { __LOAD(__reg_0, 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __LOAD(__reg_0, 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __LOAD(__reg_0, 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __LOAD(__reg_0, 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __LOAD(__reg_0, 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __LOAD(__reg_0, 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __LOAD(__reg_0, 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __LOAD(__reg_0, 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __LOAD(__reg_0, 8); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(4, __reg_1_4); } __a_sb = __a_sb_double + __blockSize * 1; if (__c1Id == __side1Num - 1) { for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 13;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; __DB_SWITCH(); __syncthreads(); } if (0) {} else if (__h + 4 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); } else if (__h + 5 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); } else if (__h + 6 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); } else if (__h + 7 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_7, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); } else if (__h + 8 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_8, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); } else if (__h + 9 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_0, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); } else if (__h + 10 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_1, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); } else if (__h + 11 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_2, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); __LOAD(__reg_0, __h + 10); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_6, __reg_0); __STORE(__h + 6, __reg_1_6); } else if (__h + 12 == __c1Len - __side1Len * __c1Id + __halo1 * 2) { __LOAD(__reg_0, __h + 0); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __LOAD(__reg_0, __h + 1); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 3, __reg_1_6); __LOAD(__reg_0, __h + 2); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 2, __reg_1_7); __LOAD(__reg_0, __h + 3); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 1, __reg_1_8); __LOAD(__reg_0, __h + 4); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h + 0, __reg_1_0); __LOAD(__reg_0, __h + 5); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h + 1, __reg_1_1); __LOAD(__reg_0, __h + 6); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h + 2, __reg_1_2); __LOAD(__reg_0, __h + 7); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h + 3, __reg_1_3); __LOAD(__reg_0, __h + 8); __CALC1(__reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_3, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h + 4, __reg_1_4); __LOAD(__reg_0, __h + 9); __CALC1(__reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_4, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h + 5, __reg_1_5); __LOAD(__reg_0, __h + 10); __CALC1(__reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_5, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h + 6, __reg_1_6); __LOAD(__reg_0, __h + 11); __CALC1(__reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_6, __reg_1_7, __reg_0); __STORE(__h + 7, __reg_1_7); } } else { for (__h = 9; __h <= __side1LenOl - 9;) { __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; __DB_SWITCH(); __syncthreads(); } if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_0); __STORE(__h - 4, __reg_1_5); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_0); __STORE(__h - 4, __reg_1_6); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_0); __STORE(__h - 4, __reg_1_7); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_0); __STORE(__h - 4, __reg_1_8); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_0); __STORE(__h - 4, __reg_1_0); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_1_1, __reg_0); __STORE(__h - 4, __reg_1_1); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_1_2, __reg_0); __STORE(__h - 4, __reg_1_2); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_1_3, __reg_0); __STORE(__h - 4, __reg_1_3); __h++; if (__h == __side1LenOl) return; __LOAD(__reg_0, __h); __CALC1(__reg_1_3, __reg_1_2, __reg_1_1, __reg_1_0, __reg_1_8, __reg_1_7, __reg_1_6, __reg_1_5, __reg_1_4, __reg_0); __STORE(__h - 4, __reg_1_4); __h++; } }
06bccae5566a04aacbb5745d9c77d92f11f293e9.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdlib> #include <chrono> #define cimg_display 0 #include <hip/hip_runtime.h> #include <CImg.h> using namespace std; using namespace cimg_library; const int block_row = 32; __global__ void img_filter_kernel(unsigned char *input, unsigned char *output, size_t r, size_t c) { int x = blockIdx.x * block_row + threadIdx.x, y = blockIdx.y * block_row + threadIdx.y, z = blockIdx.z; int base = r * c * z; int sum = 0, count = 0; for (int i = y - 1; i <= y + 1; ++i) if (i >= 0 && i < r) for (int j = x - 1; j <= x + 1; ++j) if (j >= 0 && j < c) { ++count; sum += input[base + c * i + j]; } if (count) output[base + c * y + x] = sum / count; } typedef CImg<unsigned char> Image; void img_filter(Image &img) { unsigned char *input_d, *output_d; size_t data_size = img.size() * sizeof(unsigned char); hipMalloc((void **)&input_d, data_size); hipMalloc((void **)&output_d, data_size); hipMemcpy(input_d, img.data(), data_size, hipMemcpyHostToDevice); size_t r = img.height(), c = img.width(); dim3 grid_dim((c+block_row-1)/block_row, (r+block_row-1)/block_row, img.spectrum()), block_dim(block_row, block_row); hipLaunchKernelGGL(( img_filter_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, input_d, output_d, r, c); hipMemcpy(img.data(), output_d, data_size, hipMemcpyDeviceToHost); hipFree(input_d); hipFree(output_d); } int main(int argc, char *argv[]) { Image img("lena.jpg"); chrono::time_point<chrono::system_clock> start, end; int ms; start = chrono::system_clock::now(); img_filter(img); end = chrono::system_clock::now(); ms = chrono::duration_cast<chrono::milliseconds>(end-start).count(); printf("GPU: %d\n", ms); img.save("lena_filter.jpg"); return 0; }
06bccae5566a04aacbb5745d9c77d92f11f293e9.cu
#include <cstdio> #include <cstdlib> #include <chrono> #define cimg_display 0 #include <cuda.h> #include <CImg.h> using namespace std; using namespace cimg_library; const int block_row = 32; __global__ void img_filter_kernel(unsigned char *input, unsigned char *output, size_t r, size_t c) { int x = blockIdx.x * block_row + threadIdx.x, y = blockIdx.y * block_row + threadIdx.y, z = blockIdx.z; int base = r * c * z; int sum = 0, count = 0; for (int i = y - 1; i <= y + 1; ++i) if (i >= 0 && i < r) for (int j = x - 1; j <= x + 1; ++j) if (j >= 0 && j < c) { ++count; sum += input[base + c * i + j]; } if (count) output[base + c * y + x] = sum / count; } typedef CImg<unsigned char> Image; void img_filter(Image &img) { unsigned char *input_d, *output_d; size_t data_size = img.size() * sizeof(unsigned char); cudaMalloc((void **)&input_d, data_size); cudaMalloc((void **)&output_d, data_size); cudaMemcpy(input_d, img.data(), data_size, cudaMemcpyHostToDevice); size_t r = img.height(), c = img.width(); dim3 grid_dim((c+block_row-1)/block_row, (r+block_row-1)/block_row, img.spectrum()), block_dim(block_row, block_row); img_filter_kernel<<<grid_dim, block_dim>>>(input_d, output_d, r, c); cudaMemcpy(img.data(), output_d, data_size, cudaMemcpyDeviceToHost); cudaFree(input_d); cudaFree(output_d); } int main(int argc, char *argv[]) { Image img("lena.jpg"); chrono::time_point<chrono::system_clock> start, end; int ms; start = chrono::system_clock::now(); img_filter(img); end = chrono::system_clock::now(); ms = chrono::duration_cast<chrono::milliseconds>(end-start).count(); printf("GPU: %d\n", ms); img.save("lena_filter.jpg"); return 0; }
0b83473890f739c2235afc6a31614ce682962a7a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Equihash CUDA solver // Copyright (c) 2016 John Tromp #define XINTREE #define UNROLL #define htole32(x) (x) #define HAVE_DECL_HTOLE32 1 #include "../cpu_tromp/equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <functional> #include <vector> #include <iostream> #include "eqcuda.hpp" #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) #ifndef SAVEMEM #if RESTBITS == 4 // can't save memory in such small buckets #define SAVEMEM 1 #elif RESTBITS >= 8 // take advantage of law of large numbers (sum of 2^8 random numbers) // this reduces (200,9) memory to under 192MB, with negligible discarding #define SAVEMEM 9/14 #endif #endif // number of buckets static const u32 NBUCKETS = 1 << BUCKBITS; // bucket mask static const u32 BUCKMASK = NBUCKETS - 1; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS + 1 + 1; static const u32 SLOTRANGE = 1 << SLOTBITS; // number of slots per bucket static const u32 NSLOTS = SLOTRANGE * SAVEMEM; // SLOTBITS mask static const u32 SLOTMASK = SLOTRANGE - 1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1 << RESTBITS; // RESTBITS mask static const u32 RESTMASK = NRESTS - 1; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { u32 bid_s0_s1_x; // manual bitfields __device__ tree(const u32 idx, const u32 xh) { bid_s0_s1_x = idx << RESTBITS | xh; } __device__ tree(const u32 idx) { bid_s0_s1_x = idx; } __device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) { #ifdef XINTREE bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh; #else bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1; #endif } __device__ u32 getindex() const { #ifdef XINTREE return bid_s0_s1_x >> RESTBITS; #else return bid_s0_s1_x; #endif } __device__ u32 bucketid() const { #ifdef XINTREE return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS); #else return bid_s0_s1_x >> (2 * SLOTBITS); #endif } __device__ u32 slotid0() const { #ifdef XINTREE return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK; #else return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK; #endif } __device__ u32 slotid1() const { #ifdef XINTREE return (bid_s0_s1_x >> RESTBITS) & SLOTMASK; #else return bid_s0_s1_x & SLOTMASK; #endif } __device__ u32 xhash() const { return bid_s0_s1_x & RESTMASK; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK + 1) / 2]; bucket1 *trees1[WK / 2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) { setheader(&blake_ctx, header, len, nonce, nlen); checkCudaErrors(hipMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots0(const u32 bid) { u32 &nslot = nslots[0][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ u32 getnslots1(const u32 bid) { u32 &nslot = nslots[1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i = 0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size + i]; indices[size + i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid()]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0()].attr.getindex(); indices[size] = buck[t.slotid1()].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid()]; const u32 size = 1 << 1; listindices1(buck[t.slotid0()].attr, indices); listindices1(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid()]; const u32 size = 1 << 2; listindices2(buck[t.slotid0()].attr, indices); listindices2(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid()]; const u32 size = 1 << 3; listindices3(buck[t.slotid0()].attr, indices); listindices3(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid()]; const u32 size = 1 << 4; listindices4(buck[t.slotid0()].attr, indices); listindices4(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid()]; const u32 size = 1 << 5; listindices5(buck[t.slotid0()].attr, indices); listindices5(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid()]; const u32 size = 1 << 6; listindices6(buck[t.slotid0()].attr, indices); listindices6(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid()]; const u32 size = 1 << 7; listindices7(buck[t.slotid0()].attr, indices); listindices7(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid()]; const u32 size = 1 << 8; listindices8(buck[t.slotid0()].attr, indices); listindices8(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #elif WK==7 listindices7(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #elif WK==7 listindices7(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(hipMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), hipMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6); binsizes[bsize]++; } for (u32 i = 0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4; #elif WN == 192 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 192 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] & 0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif static const xslot xnil = ~0; xslot xhashslots[NRESTS]; xslot nextxhashslot[NSLOTS]; xslot nextslot; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(xhashslots, xnil, NRESTS * sizeof(xslot)); memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else nextslot = xhashslots[xh]; nextxhashslot[s1] = nextslot; xhashslots[xh] = s1; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return nextslot != xnil; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; #else nextslot = nextxhashslot[s0 = nextslot]; #endif return s0; } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); // always 23 ? const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN / 8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; slot0 &s = eq->hta.trees0[0][bucketid][slot]; #ifdef XINTREE s.attr = tree(block*HASHESPERBLAKE+i, xhash); #else s.attr = tree(block*HASHESPERBLAKE+i); #endif memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4 | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 192 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2 | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]); xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 192 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i = htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x1234); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4123); const u32 xorbucketid = bexor >> 8; const u32 xhash = bexor >> 4 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4012); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x3456); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); // assume WK odd for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { #ifdef XINTREE eq->candidate(tree(bucketid, s0, s1, 0)); #else eq->candidate(tree(bucketid, s0, s1)); #endif } } } } } eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id) { eq = new equi(threadsperblock * totalblocks); sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096); solutions = (proof*)(((long long)sol_memory + 4095) & -4096); checkCudaErrors(hipSetDevice(device_id)); checkCudaErrors(hipDeviceReset()); checkCudaErrors(hipSetDeviceFlags(hipDeviceScheduleBlockingSync)); checkCudaErrors(hipDeviceSetCacheConfig(hipFuncCachePreferL1)); checkCudaErrors(hipMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(hipMalloc((void**)&heap1, sizeof(digit1))); for (u32 r = 0; r < WK; r++) if ((r & 1) == 0) eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2); else eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2); checkCudaErrors(hipMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(hipMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof))); checkCudaErrors(hipMalloc((void**)&device_eq, sizeof(equi))); } eq_cuda_context::~eq_cuda_context() { /*checkCudaErrors(hipFree(eq->nslots)); checkCudaErrors(hipFree(eq->sols)); checkCudaErrors(hipFree(eq->hta.trees0[0])); checkCudaErrors(hipFree(eq->hta.trees1[0]));*/ checkCudaErrors(hipSetDevice(device_id)); checkCudaErrors(hipDeviceReset()); free(sol_memory); delete eq; } void eq_cuda_context::solve(const char *tequihash_header, unsigned int tequihash_header_len, const char* nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef) { checkCudaErrors(hipSetDevice(device_id)); eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len); checkCudaErrors(hipMemcpy(device_eq, eq, sizeof(equi), hipMemcpyHostToDevice)); digitH << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) digit_1 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit2 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit3 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit4 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit5 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit6 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit7 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit8 << <totalblocks, threadsperblock >> >(device_eq); #else for (u32 r = 1; r < WK; r++) { r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r) : digitE << <totalblocks, threadsperblock >> >(device_eq, r); } #endif if (cancelf()) return; digitK << <totalblocks, threadsperblock >> >(device_eq); checkCudaErrors(hipMemcpy(eq, device_eq, sizeof(equi), hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), hipMemcpyDeviceToHost)); for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++) { std::vector<uint32_t> index_vector(PROOFSIZE); for (u32 i = 0; i < PROOFSIZE; i++) { index_vector[i] = solutions[s][i]; } solutionf(index_vector, DIGITBITS, nullptr); if (cancelf()) return; } hashdonef(); }
0b83473890f739c2235afc6a31614ce682962a7a.cu
// Equihash CUDA solver // Copyright (c) 2016 John Tromp #define XINTREE #define UNROLL #define htole32(x) (x) #define HAVE_DECL_HTOLE32 1 #include "../cpu_tromp/equi.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <functional> #include <vector> #include <iostream> #include "eqcuda.hpp" #include "blake2b.cu" typedef uint16_t u16; typedef uint64_t u64; #ifndef RESTBITS #define RESTBITS 4 #endif // 2_log of number of buckets #define BUCKBITS (DIGITBITS-RESTBITS) #ifndef SAVEMEM #if RESTBITS == 4 // can't save memory in such small buckets #define SAVEMEM 1 #elif RESTBITS >= 8 // take advantage of law of large numbers (sum of 2^8 random numbers) // this reduces (200,9) memory to under 192MB, with negligible discarding #define SAVEMEM 9/14 #endif #endif // number of buckets static const u32 NBUCKETS = 1 << BUCKBITS; // bucket mask static const u32 BUCKMASK = NBUCKETS - 1; // 2_log of number of slots per bucket static const u32 SLOTBITS = RESTBITS + 1 + 1; static const u32 SLOTRANGE = 1 << SLOTBITS; // number of slots per bucket static const u32 NSLOTS = SLOTRANGE * SAVEMEM; // SLOTBITS mask static const u32 SLOTMASK = SLOTRANGE - 1; // number of possible values of xhash (rest of n) bits static const u32 NRESTS = 1 << RESTBITS; // RESTBITS mask static const u32 RESTMASK = NRESTS - 1; // number of blocks of hashes extracted from single 512 bit blake2b output static const u32 NBLOCKS = (NHASHES + HASHESPERBLAKE - 1) / HASHESPERBLAKE; // nothing larger found in 100000 runs static const u32 MAXSOLS = 8; // tree node identifying its children as two different slots in // a bucket on previous layer with the same rest bits (x-tra hash) struct tree { u32 bid_s0_s1_x; // manual bitfields __device__ tree(const u32 idx, const u32 xh) { bid_s0_s1_x = idx << RESTBITS | xh; } __device__ tree(const u32 idx) { bid_s0_s1_x = idx; } __device__ tree(const u32 bid, const u32 s0, const u32 s1, const u32 xh) { #ifdef XINTREE bid_s0_s1_x = ((((bid << SLOTBITS) | s0) << SLOTBITS) | s1) << RESTBITS | xh; #else bid_s0_s1_x = (((bid << SLOTBITS) | s0) << SLOTBITS) | s1; #endif } __device__ u32 getindex() const { #ifdef XINTREE return bid_s0_s1_x >> RESTBITS; #else return bid_s0_s1_x; #endif } __device__ u32 bucketid() const { #ifdef XINTREE return bid_s0_s1_x >> (2 * SLOTBITS + RESTBITS); #else return bid_s0_s1_x >> (2 * SLOTBITS); #endif } __device__ u32 slotid0() const { #ifdef XINTREE return (bid_s0_s1_x >> SLOTBITS + RESTBITS) & SLOTMASK; #else return (bid_s0_s1_x >> SLOTBITS) & SLOTMASK; #endif } __device__ u32 slotid1() const { #ifdef XINTREE return (bid_s0_s1_x >> RESTBITS) & SLOTMASK; #else return bid_s0_s1_x & SLOTMASK; #endif } __device__ u32 xhash() const { return bid_s0_s1_x & RESTMASK; } }; union hashunit { u32 word; uchar bytes[sizeof(u32)]; }; #define WORDS(bits) ((bits + 31) / 32) #define HASHWORDS0 WORDS(WN - DIGITBITS + RESTBITS) #define HASHWORDS1 WORDS(WN - 2*DIGITBITS + RESTBITS) struct slot0 { tree attr; hashunit hash[HASHWORDS0]; }; struct slot1 { tree attr; hashunit hash[HASHWORDS1]; }; // a bucket is NSLOTS treenodes typedef slot0 bucket0[NSLOTS]; typedef slot1 bucket1[NSLOTS]; // the N-bit hash consists of K+1 n-bit "digits" // each of which corresponds to a layer of NBUCKETS buckets typedef bucket0 digit0[NBUCKETS]; typedef bucket1 digit1[NBUCKETS]; // size (in bytes) of hash in round 0 <= r < WK u32 hhashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } // size (in bytes) of hash in round 0 <= r < WK __device__ u32 hashsize(const u32 r) { #ifdef XINTREE const u32 hashbits = WN - (r + 1) * DIGITBITS; #else const u32 hashbits = WN - (r + 1) * DIGITBITS + RESTBITS; #endif return (hashbits + 7) / 8; } u32 hhashwords(u32 bytes) { return (bytes + 3) / 4; } __device__ u32 hashwords(u32 bytes) { return (bytes + 3) / 4; } // manages hash and tree data struct htalloc { bucket0 *trees0[(WK + 1) / 2]; bucket1 *trees1[WK / 2]; }; typedef u32 bsizes[NBUCKETS]; struct equi { blake2b_state blake_ctx; htalloc hta; bsizes *nslots; proof *sols; u32 nsols; u32 nthreads; equi(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(const char *header, const u32 len, const char* nonce, const u32 nlen) { setheader(&blake_ctx, header, len, nonce, nlen); checkCudaErrors(cudaMemset(nslots, 0, NBUCKETS * sizeof(u32))); nsols = 0; } __device__ u32 getnslots0(const u32 bid) { u32 &nslot = nslots[0][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ u32 getnslots1(const u32 bid) { u32 &nslot = nslots[1][bid]; const u32 n = min(nslot, NSLOTS); nslot = 0; return n; } __device__ void orderindices(u32 *indices, u32 size) { if (indices[0] > indices[size]) { for (u32 i = 0; i < size; i++) { const u32 tmp = indices[i]; indices[i] = indices[size + i]; indices[size + i] = tmp; } } } __device__ void listindices1(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[0][t.bucketid()]; const u32 size = 1 << 0; indices[0] = buck[t.slotid0()].attr.getindex(); indices[size] = buck[t.slotid1()].attr.getindex(); orderindices(indices, size); } __device__ void listindices2(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[0][t.bucketid()]; const u32 size = 1 << 1; listindices1(buck[t.slotid0()].attr, indices); listindices1(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices3(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[1][t.bucketid()]; const u32 size = 1 << 2; listindices2(buck[t.slotid0()].attr, indices); listindices2(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices4(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[1][t.bucketid()]; const u32 size = 1 << 3; listindices3(buck[t.slotid0()].attr, indices); listindices3(buck[t.slotid1()].attr, indices + size); orderindices(indices, size); } __device__ void listindices5(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[2][t.bucketid()]; const u32 size = 1 << 4; listindices4(buck[t.slotid0()].attr, indices); listindices4(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices6(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[2][t.bucketid()]; const u32 size = 1 << 5; listindices5(buck[t.slotid0()].attr, indices); listindices5(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices7(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[3][t.bucketid()]; const u32 size = 1 << 6; listindices6(buck[t.slotid0()].attr, indices); listindices6(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices8(const tree t, u32 *indices) { const bucket1 &buck = hta.trees1[3][t.bucketid()]; const u32 size = 1 << 7; listindices7(buck[t.slotid0()].attr, indices); listindices7(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void listindices9(const tree t, u32 *indices) { const bucket0 &buck = hta.trees0[4][t.bucketid()]; const u32 size = 1 << 8; listindices8(buck[t.slotid0()].attr, indices); listindices8(buck[t.slotid1()].attr, indices+size); orderindices(indices, size); } __device__ void candidate(const tree t) { proof prf; #if WK==9 listindices9(t, prf); #elif WK==5 listindices5(t, prf); #elif WK==7 listindices7(t, prf); #else #error not implemented #endif if (probdupe(prf)) return; u32 soli = atomicAdd(&nsols, 1); if (soli < MAXSOLS) #if WK==9 listindices9(t, sols[soli]); #elif WK==5 listindices5(t, sols[soli]); #elif WK==7 listindices7(t, sols[soli]); #else #error not implemented #endif } void showbsizes(u32 r) { #if defined(HIST) || defined(SPARK) || defined(LOGSPARK) u32 ns[NBUCKETS]; checkCudaErrors(cudaMemcpy(ns, nslots[r & 1], NBUCKETS * sizeof(u32), cudaMemcpyDeviceToHost)); u32 binsizes[65]; memset(binsizes, 0, 65 * sizeof(u32)); for (u32 bucketid = 0; bucketid < NBUCKETS; bucketid++) { u32 bsize = min(ns[bucketid], NSLOTS) >> (SLOTBITS - 6); binsizes[bsize]++; } for (u32 i = 0; i < 65; i++) { #ifdef HIST printf(" %d:%d", i, binsizes[i]); #else #ifdef SPARK u32 sparks = binsizes[i] / SPARKSCALE; #else u32 sparks = 0; for (u32 bs = binsizes[i]; bs; bs >>= 1) sparks++; sparks = sparks * 7 / SPARKSCALE; #endif printf("\342\226%c", '\201' + sparks); #endif } printf("\n"); #endif } // proper dupe test is a little costly on GPU, so allow false negatives __device__ bool probdupe(u32 *prf) { unsigned short susp[PROOFSIZE]; memset(susp, 0xffff, PROOFSIZE * sizeof(unsigned short)); for (u32 i=0; i<PROOFSIZE; i++) { u32 bin = prf[i] & (PROOFSIZE-1); unsigned short msb = prf[i]>>WK; if (msb == susp[bin]) return true; susp[bin] = msb; } return false; } struct htlayout { htalloc hta; u32 prevhashunits; u32 nexthashunits; u32 dunits; u32 prevbo; u32 nextbo; __device__ htlayout(equi *eq, u32 r) : hta(eq->hta), prevhashunits(0), dunits(0) { u32 nexthashbytes = hashsize(r); nexthashunits = hashwords(nexthashbytes); prevbo = 0; nextbo = nexthashunits * sizeof(hashunit) - nexthashbytes; // 0-3 if (r) { u32 prevhashbytes = hashsize(r-1); prevhashunits = hashwords(prevhashbytes); prevbo = prevhashunits * sizeof(hashunit) - prevhashbytes; // 0-3 dunits = prevhashunits - nexthashunits; } } __device__ u32 getxhash0(const slot0* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] >> 4; #elif WN == 200 && RESTBITS == 8 return (pslot->hash->bytes[prevbo] & 0xf) << 4 | pslot->hash->bytes[prevbo + 1] >> 4; #elif WN == 192 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return (pslot->hash->bytes[prevbo] & 0x3) << 4 | pslot->hash->bytes[prevbo+1] >> 4; #else #error non implemented #endif } __device__ u32 getxhash1(const slot1* pslot) const { #ifdef XINTREE return pslot->attr.xhash(); #elif WN == 200 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 8 return pslot->hash->bytes[prevbo]; #elif WN == 192 && RESTBITS == 4 return pslot->hash->bytes[prevbo] & 0xf; #elif WN == 200 && RESTBITS == 6 return pslot->hash->bytes[prevbo] & 0x3f; #else #error non implemented #endif } __device__ bool equal(const hashunit *hash0, const hashunit *hash1) const { return hash0[prevhashunits - 1].word == hash1[prevhashunits - 1].word; } }; struct collisiondata { #ifdef XBITMAP #if NSLOTS > 64 #error cant use XBITMAP with more than 64 slots #endif u64 xhashmap[NRESTS]; u64 xmap; #else #if RESTBITS <= 6 typedef uchar xslot; #else typedef u16 xslot; #endif static const xslot xnil = ~0; xslot xhashslots[NRESTS]; xslot nextxhashslot[NSLOTS]; xslot nextslot; #endif u32 s0; __device__ void clear() { #ifdef XBITMAP memset(xhashmap, 0, NRESTS * sizeof(u64)); #else memset(xhashslots, xnil, NRESTS * sizeof(xslot)); memset(nextxhashslot, xnil, NSLOTS * sizeof(xslot)); #endif } __device__ bool addslot(u32 s1, u32 xh) { #ifdef XBITMAP xmap = xhashmap[xh]; xhashmap[xh] |= (u64)1 << s1; s0 = ~0; return true; #else nextslot = xhashslots[xh]; nextxhashslot[s1] = nextslot; xhashslots[xh] = s1; return true; #endif } __device__ bool nextcollision() const { #ifdef XBITMAP return xmap != 0; #else return nextslot != xnil; #endif } __device__ u32 slot() { #ifdef XBITMAP const u32 ffs = __ffsll(xmap); s0 += ffs; xmap >>= ffs; #else nextslot = nextxhashslot[s0 = nextslot]; #endif return s0; } }; }; __global__ void digitH(equi *eq) { uchar hash[HASHOUT]; blake2b_state state; equi::htlayout htl(eq, 0); const u32 hashbytes = hashsize(0); // always 23 ? const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 block = id; block < NBLOCKS; block += eq->nthreads) { state = eq->blake_ctx; blake2b_gpu_hash(&state, block, hash, HASHOUT); for (u32 i = 0; i<HASHESPERBLAKE; i++) { const uchar *ph = hash + i * WN / 8; #if BUCKBITS == 16 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 8) | ph[1]; #ifdef XINTREE const u32 xhash = ph[2] >> 4; #endif #elif BUCKBITS == 14 && RESTBITS == 6 const u32 bucketid = ((u32)ph[0] << 6) | ph[1] >> 2; #elif BUCKBITS == 12 && RESTBITS == 8 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; #elif BUCKBITS == 20 && RESTBITS == 4 const u32 bucketid = ((((u32)ph[0] << 8) | ph[1]) << 4) | ph[2] >> 4; #ifdef XINTREE const u32 xhash = ph[2] & 0xf; #endif #elif BUCKBITS == 12 && RESTBITS == 4 const u32 bucketid = ((u32)ph[0] << 4) | ph[1] >> 4; const u32 xhash = ph[1] & 0xf; #else #error not implemented #endif const u32 slot = atomicAdd(&eq->nslots[0][bucketid], 1); if (slot >= NSLOTS) continue; slot0 &s = eq->hta.trees0[0][bucketid][slot]; #ifdef XINTREE s.attr = tree(block*HASHESPERBLAKE+i, xhash); #else s.attr = tree(block*HASHESPERBLAKE+i); #endif memcpy(s.hash->bytes+htl.nextbo, ph+WN/8-hashbytes, hashbytes); } } } __global__ void digitO(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) & 0xf) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1])) << 4 | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 192 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (xhash = bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; xhash &= 0xf; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (xhash = bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; xhash &= 0xf; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) & 0xf) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 2 | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 6; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[r/2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i=htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } __global__ void digitE(equi *eq, const u32 r) { equi::htlayout htl(eq, r); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[(r - 1) / 2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; u32 xorbucketid; u32 xhash; const uchar *bytes0 = pslot0->hash->bytes, *bytes1 = pslot1->hash->bytes; #if WN == 200 && BUCKBITS == 16 && RESTBITS == 4 && defined(XINTREE) xorbucketid = ((u32)(bytes0[htl.prevbo] ^ bytes1[htl.prevbo]) << 8) | (bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]); xhash = (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 192 && BUCKBITS == 20 && RESTBITS == 4 xorbucketid = ((((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 8) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2])) << 4) | (bytes0[htl.prevbo + 3] ^ bytes1[htl.prevbo + 3]) >> 4; #elif WN == 96 && BUCKBITS == 12 && RESTBITS == 4 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 4) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 4; #elif WN == 200 && BUCKBITS == 14 && RESTBITS == 6 xorbucketid = ((u32)(bytes0[htl.prevbo + 1] ^ bytes1[htl.prevbo + 1]) << 6) | (bytes0[htl.prevbo + 2] ^ bytes1[htl.prevbo + 2]) >> 2; #else #error not implemented #endif const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[r / 2][xorbucketid][xorslot]; #ifdef XINTREE xs.attr = tree(bucketid, s0, s1, xhash); #else xs.attr = tree(bucketid, s0, s1); #endif for (u32 i = htl.dunits; i < htl.prevhashunits; i++) xs.hash[i - htl.dunits].word = pslot0->hash[i].word ^ pslot1->hash[i].word; } } } } #ifdef UNROLL __global__ void digit_1(equi *eq) { equi::htlayout htl(eq, 1); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[0][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[0][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; xs.hash[4].word = pslot0->hash[5].word ^ pslot1->hash[5].word; } } } } __global__ void digit2(equi *eq) { equi::htlayout htl(eq, 2); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[0][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x0123); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[4].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit3(equi *eq) { equi::htlayout htl(eq, 3); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[1][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x1234); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[1][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; xs.hash[3].word = pslot0->hash[4].word ^ pslot1->hash[4].word; } } } } __global__ void digit4(equi *eq) { equi::htlayout htl(eq, 4); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[1][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4123); const u32 xorbucketid = bexor >> 8; const u32 xhash = bexor >> 4 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; xs.hash[2].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[3].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit5(equi *eq) { equi::htlayout htl(eq, 5); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[2][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[2][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; xs.hash[2].word = pslot0->hash[3].word ^ pslot1->hash[3].word; } } } } __global__ void digit6(equi *eq) { equi::htlayout htl(eq, 6); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[2][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x2345); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; xs.hash[1].word = pslot0->hash[2].word ^ pslot1->hash[2].word; } } } } __global__ void digit7(equi *eq) { equi::htlayout htl(eq, 7); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[3][bucketid]; u32 bsize = eq->getnslots0(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 bexor = __byte_perm(xor0, 0, 0x4012); const u32 xorbucketid = bexor >> 4 & BUCKMASK; const u32 xhash = bexor & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[1][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot1 &xs = htl.hta.trees1[3][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor0; xs.hash[1].word = pslot0->hash[1].word ^ pslot1->hash[1].word; } } } } __global__ void digit8(equi *eq) { equi::htlayout htl(eq, 8); equi::collisiondata cd; const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot1 *buck = htl.hta.trees1[3][bucketid]; u32 bsize = eq->getnslots1(bucketid); for (u32 s1 = 0; s1 < bsize; s1++) { const slot1 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash1(pslot1))) continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot1 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) continue; const u32 xor0 = pslot0->hash->word ^ pslot1->hash->word; const u32 xor1 = pslot0->hash[1].word ^ pslot1->hash[1].word; const u32 bexor = __byte_perm(xor0, xor1, 0x3456); const u32 xorbucketid = bexor >> 16; const u32 xhash = bexor >> 12 & 0xf; const u32 xorslot = atomicAdd(&eq->nslots[0][xorbucketid], 1); if (xorslot >= NSLOTS) continue; slot0 &xs = htl.hta.trees0[4][xorbucketid][xorslot]; xs.attr = tree(bucketid, s0, s1, xhash); xs.hash[0].word = xor1; } } } } #endif __global__ void digitK(equi *eq) { equi::collisiondata cd; equi::htlayout htl(eq, WK); const u32 id = blockIdx.x * blockDim.x + threadIdx.x; for (u32 bucketid = id; bucketid < NBUCKETS; bucketid += eq->nthreads) { cd.clear(); slot0 *buck = htl.hta.trees0[(WK - 1) / 2][bucketid]; u32 bsize = eq->getnslots0(bucketid); // assume WK odd for (u32 s1 = 0; s1 < bsize; s1++) { const slot0 *pslot1 = buck + s1; if (!cd.addslot(s1, htl.getxhash0(pslot1))) // assume WK odd continue; for (; cd.nextcollision();) { const u32 s0 = cd.slot(); const slot0 *pslot0 = buck + s0; if (htl.equal(pslot0->hash, pslot1->hash)) { #ifdef XINTREE eq->candidate(tree(bucketid, s0, s1, 0)); #else eq->candidate(tree(bucketid, s0, s1)); #endif } } } } } eq_cuda_context::eq_cuda_context(int tpb, int blocks, int id) : threadsperblock(tpb), totalblocks(blocks), device_id(id) { eq = new equi(threadsperblock * totalblocks); sol_memory = malloc(sizeof(proof) * MAXSOLS + 4096); solutions = (proof*)(((long long)sol_memory + 4095) & -4096); checkCudaErrors(cudaSetDevice(device_id)); checkCudaErrors(cudaDeviceReset()); checkCudaErrors(cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync)); checkCudaErrors(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1)); checkCudaErrors(cudaMalloc((void**)&heap0, sizeof(digit0))); checkCudaErrors(cudaMalloc((void**)&heap1, sizeof(digit1))); for (u32 r = 0; r < WK; r++) if ((r & 1) == 0) eq->hta.trees0[r / 2] = (bucket0 *)(heap0 + r / 2); else eq->hta.trees1[r / 2] = (bucket1 *)(heap1 + r / 2); checkCudaErrors(cudaMalloc((void**)&eq->nslots, 2 * NBUCKETS * sizeof(u32))); checkCudaErrors(cudaMalloc((void**)&eq->sols, MAXSOLS * sizeof(proof))); checkCudaErrors(cudaMalloc((void**)&device_eq, sizeof(equi))); } eq_cuda_context::~eq_cuda_context() { /*checkCudaErrors(cudaFree(eq->nslots)); checkCudaErrors(cudaFree(eq->sols)); checkCudaErrors(cudaFree(eq->hta.trees0[0])); checkCudaErrors(cudaFree(eq->hta.trees1[0]));*/ checkCudaErrors(cudaSetDevice(device_id)); checkCudaErrors(cudaDeviceReset()); free(sol_memory); delete eq; } void eq_cuda_context::solve(const char *tequihash_header, unsigned int tequihash_header_len, const char* nonce, unsigned int nonce_len, std::function<bool()> cancelf, std::function<void(const std::vector<uint32_t>&, size_t, const unsigned char*)> solutionf, std::function<void(void)> hashdonef) { checkCudaErrors(cudaSetDevice(device_id)); eq->setheadernonce(tequihash_header, tequihash_header_len, nonce, nonce_len); checkCudaErrors(cudaMemcpy(device_eq, eq, sizeof(equi), cudaMemcpyHostToDevice)); digitH << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; #if BUCKBITS == 16 && RESTBITS == 4 && defined XINTREE && defined(UNROLL) digit_1 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit2 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit3 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit4 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit5 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit6 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit7 << <totalblocks, threadsperblock >> >(device_eq); if (cancelf()) return; digit8 << <totalblocks, threadsperblock >> >(device_eq); #else for (u32 r = 1; r < WK; r++) { r & 1 ? digitO << <totalblocks, threadsperblock >> >(device_eq, r) : digitE << <totalblocks, threadsperblock >> >(device_eq, r); } #endif if (cancelf()) return; digitK << <totalblocks, threadsperblock >> >(device_eq); checkCudaErrors(cudaMemcpy(eq, device_eq, sizeof(equi), cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy(solutions, eq->sols, MAXSOLS * sizeof(proof), cudaMemcpyDeviceToHost)); for (unsigned s = 0; (s < eq->nsols) && (s < MAXSOLS); s++) { std::vector<uint32_t> index_vector(PROOFSIZE); for (u32 i = 0; i < PROOFSIZE; i++) { index_vector[i] = solutions[s][i]; } solutionf(index_vector, DIGITBITS, nullptr); if (cancelf()) return; } hashdonef(); }
a77f4cb578197625bbf2533d58cf3bd0ac5e9e6d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "accessor.h" #include "model.h" #include "cuda_helper.h" template<typename DT, int dim> TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime) { const AccessorRO<DT, dim> acc(region, fid); rect = runtime->get_index_space_domain( ctx, req.region.get_index_space()); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); } template<typename DT> __global__ void zero_array(DT* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 0; } } template<typename DT, int dim> TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime, bool readOutput) { rect = runtime->get_index_space_domain( ctx, req.region.get_index_space()); if (readOutput) { const AccessorRW<DT, dim> acc(region, fid); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); } else { const AccessorWO<DT, dim> acc(region, fid); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); // FIXME: currently we zero init the region if not read output hipLaunchKernelGGL(( assign_kernel<DT>), dim3(GET_BLOCKS(rect.volume())), dim3(CUDA_NUM_THREADS), 0, 0, ptr, rect.volume(), 0.0f); checkCUDA(hipDeviceSynchronize()); } } template class TensorAccessorR<float, 1>; template class TensorAccessorR<float, 2>; template class TensorAccessorR<float, 3>; template class TensorAccessorR<float, 4>; template class TensorAccessorR<int32_t, 1>; template class TensorAccessorR<int32_t, 2>; template class TensorAccessorR<int32_t, 3>; template class TensorAccessorR<int32_t, 4>; template class TensorAccessorR<int64_t, 1>; template class TensorAccessorR<int64_t, 2>; template class TensorAccessorR<int64_t, 3>; template class TensorAccessorR<int64_t, 4>; template class TensorAccessorW<float, 1>; template class TensorAccessorW<float, 2>; template class TensorAccessorW<float, 3>; template class TensorAccessorW<float, 4>; template class TensorAccessorW<int32_t, 1>; template class TensorAccessorW<int32_t, 2>; template class TensorAccessorW<int32_t, 3>; template class TensorAccessorW<int32_t, 4>; template class TensorAccessorW<int64_t, 1>; template class TensorAccessorW<int64_t, 2>; template class TensorAccessorW<int64_t, 3>; template class TensorAccessorW<int64_t, 4>;
a77f4cb578197625bbf2533d58cf3bd0ac5e9e6d.cu
#include "accessor.h" #include "model.h" #include "cuda_helper.h" template<typename DT, int dim> TensorAccessorR<DT, dim>::TensorAccessorR(PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime) { const AccessorRO<DT, dim> acc(region, fid); rect = runtime->get_index_space_domain( ctx, req.region.get_index_space()); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); } template<typename DT> __global__ void zero_array(DT* ptr, coord_t size) { CUDA_KERNEL_LOOP(i, size) { ptr[i] = 0; } } template<typename DT, int dim> TensorAccessorW<DT, dim>::TensorAccessorW(PhysicalRegion region, RegionRequirement req, FieldID fid, Context ctx, Runtime* runtime, bool readOutput) { rect = runtime->get_index_space_domain( ctx, req.region.get_index_space()); if (readOutput) { const AccessorRW<DT, dim> acc(region, fid); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); } else { const AccessorWO<DT, dim> acc(region, fid); assert(acc.accessor.is_dense_arbitrary(rect)); ptr = acc.ptr(rect); // FIXME: currently we zero init the region if not read output assign_kernel<DT><<<GET_BLOCKS(rect.volume()), CUDA_NUM_THREADS>>>( ptr, rect.volume(), 0.0f); checkCUDA(cudaDeviceSynchronize()); } } template class TensorAccessorR<float, 1>; template class TensorAccessorR<float, 2>; template class TensorAccessorR<float, 3>; template class TensorAccessorR<float, 4>; template class TensorAccessorR<int32_t, 1>; template class TensorAccessorR<int32_t, 2>; template class TensorAccessorR<int32_t, 3>; template class TensorAccessorR<int32_t, 4>; template class TensorAccessorR<int64_t, 1>; template class TensorAccessorR<int64_t, 2>; template class TensorAccessorR<int64_t, 3>; template class TensorAccessorR<int64_t, 4>; template class TensorAccessorW<float, 1>; template class TensorAccessorW<float, 2>; template class TensorAccessorW<float, 3>; template class TensorAccessorW<float, 4>; template class TensorAccessorW<int32_t, 1>; template class TensorAccessorW<int32_t, 2>; template class TensorAccessorW<int32_t, 3>; template class TensorAccessorW<int32_t, 4>; template class TensorAccessorW<int64_t, 1>; template class TensorAccessorW<int64_t, 2>; template class TensorAccessorW<int64_t, 3>; template class TensorAccessorW<int64_t, 4>;
26df30d645f7b64df74b5e4a4e442ccc54520733.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "accumulate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *da = NULL; hipMalloc(&da, XSIZE*YSIZE); float *ans_device = NULL; hipMalloc(&ans_device, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, da,ans_device,N); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, da,ans_device,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( accumulate), dim3(gridBlock),dim3(threadBlock), 0, 0, da,ans_device,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
26df30d645f7b64df74b5e4a4e442ccc54520733.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "accumulate.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *da = NULL; cudaMalloc(&da, XSIZE*YSIZE); float *ans_device = NULL; cudaMalloc(&ans_device, XSIZE*YSIZE); int N = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); accumulate<<<gridBlock,threadBlock>>>(da,ans_device,N); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { accumulate<<<gridBlock,threadBlock>>>(da,ans_device,N); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { accumulate<<<gridBlock,threadBlock>>>(da,ans_device,N); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b1c86ca95589de6914d9379a1016eada676b627f.hip
// !!! This is a file automatically generated by hipify!!! #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/generate.h> #include <chrono> using namespace std::chrono; int num_actions = 8; int ncells = 100*100; int nrzns = 5000; int arr_size = ncells * nrzns; int n_print = 30; int my_mod_start = 0; float my_mod(){ int a = (my_mod_start)/nrzns; my_mod_start++; return (float)a; } typedef thrust::device_vector<float>::iterator dIter; int main(){ // TEST: vectorised sort auto START = high_resolution_clock::now(); // fill host array thrust::host_vector<float> H_S2_array(arr_size); for (int i = 0; i < arr_size; i++) H_S2_array[i] = i%(nrzns/100); // to expect 100 reps of each integer after sort std::cout << std::endl; auto init_Hvec = high_resolution_clock::now(); // initialise array of device vecs thrust::device_vector<float> D_arr_of_S2_vecs[num_actions]; for(int i = 0; i< num_actions; i++) D_arr_of_S2_vecs[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); auto copy_H_to_D = high_resolution_clock::now(); // maser vector for value:: this section takes 18.0716secs !!! thrust::host_vector<float> master_vals(arr_size*num_actions); // thrust::generate(master_vals.begin(), master_vals.end(), my_mod); for (int i = 0; i < arr_size*num_actions; i++) master_vals[i] = (int)(i/nrzns); // for(int i = 0; i < nrzns; i++) // std::cout << master_vals[i] << ", "; auto generate = high_resolution_clock::now(); // check master_vals thrust::device_vector<float> D_master_vals(arr_size*num_actions); D_master_vals = master_vals; std::cout << "starting jugaad sort" << std::endl; auto start = high_resolution_clock::now(); thrust::device_vector<float> master_S2_vector(arr_size*num_actions); for(int i = 0; i< num_actions; i++) thrust::copy(D_arr_of_S2_vecs[i].begin(), D_arr_of_S2_vecs[i].end(), master_S2_vector.begin() + i*arr_size); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto mid = high_resolution_clock::now(); thrust::stable_sort_by_key(master_S2_vector.begin(), master_S2_vector.end(), D_master_vals.begin()); thrust::stable_sort_by_key(D_master_vals.begin(), D_master_vals.end(), master_S2_vector.begin()); hipDeviceSynchronize(); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto end = high_resolution_clock::now(); auto duration1 = duration_cast<microseconds>(end - start); std::cout << "copy + sort time = "<< duration1.count()/1e6 << std::endl; auto duration2 = duration_cast<microseconds>(end - mid); std::cout << "only sort time = "<< duration2.count()/1e6 << std::endl; thrust::device_vector<float> D_ones(nrzns, 1); int num_vecs = arr_size * num_actions / nrzns ; thrust::device_vector<float> D_red_S2[num_vecs]; thrust::device_vector<float> D_red_counts[num_vecs]; for (int i = 0; i < num_vecs; i++){ D_red_S2[i] = thrust::device_vector<float>(nrzns); D_red_counts[i] = thrust::device_vector<float>(nrzns); } thrust::device_vector<float> D_redS2_size(num_vecs); thrust::pair<dIter, dIter> new_end; auto red_start = high_resolution_clock::now(); // This section takes 3 seconds for (int i = 0; i < num_vecs; i++){ new_end = thrust::reduce_by_key(master_S2_vector.begin() + (i*nrzns), master_S2_vector.begin() + ((i+1)*nrzns), D_ones.begin(), D_red_S2[i].begin(), D_red_counts[i].begin()); // D_redS2_size[i] = new_end.first - &D_red_S2[i][0]; // std::cout << D_redS2_size[i] << std::endl; } auto red_end = high_resolution_clock::now(); auto red_duration = duration_cast<microseconds>(red_end - red_start); std::cout << "reduce_by_key = "<< red_duration.count()/1e6 << std::endl; auto time_spent = duration_cast<microseconds>(init_Hvec - START); std::cout << "initialise H_vec = "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(copy_H_to_D - init_Hvec); std::cout << "copy_H_to_D= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(generate - copy_H_to_D); std::cout << "generate= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(red_end - START); std::cout << "Total time= "<< time_spent.count()/1e6 << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "vec[" << i << "]" << std::endl; // for (int j = 0; j < 110; j++) // std::cout<< D_red_S2[i][j] << " , " << D_red_counts[i][j] << std::endl; // } return 0; } // int main(){ // // TEST: array of vectors do not form contiguous array elements // int num_actions = 8; // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int n_vecs = 5; // int vec_size = 4; // thrust::device_vector<float> arr_of_vec[n_vecs]; // for(int i = 0; i< n_vecs; i++) // arr_of_vec[i] = thrust::device_vector<float>(vec_size); // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // arr_of_vec[i][j] = vec_size*i + j; // // std::cout << arr_of_vec[vec_size] << std::endl; // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // std::cout << &arr_of_vec[i][j] << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // // TEST 3 // // sorting array of vectors in-array vs sorting vector chunks after copying data into chunks for each vector in array of vectors // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 28.8 secs // // sorting vector chunks after copying data into chunks - 19.6 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // int num_actions = 8; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // // // --------------------------------------------------------------------- // // // array of S2_vecs // // thrust::device_vector<float> D_arr_of_S2_vecs1[num_actions]; // // for(int i =0; i< num_actions; i++) // // D_arr_of_S2_vecs1[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // // auto start = high_resolution_clock::now(); // // for (int i = 0; i< num_actions; i++) // // for (int j = 0; j< nchunks; j++) // // thrust::sort(D_arr_of_S2_vecs1[i].begin() + j*chunk_size, D_arr_of_S2_vecs1[i].begin() + (j+1)*chunk_size); // // auto end = high_resolution_clock::now(); // // auto duration = duration_cast<microseconds>(end - start); // // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // // // RESULT : SORT TIME = 28.8 secs // // // --------------------------------------------------------------------- // // --------------------------------------------------------------------- // // array of S2_vecs // thrust::device_vector<float> D_arr_of_S2_vecs2[num_actions]; // for(int i =0; i< num_actions; i++) // D_arr_of_S2_vecs2[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // auto start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[num_actions][nchunks]; // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // D_arr_of_chunk_vecs[i][j] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::copy(D_arr_of_S2_vecs2[i].begin() + j*chunk_size, D_arr_of_S2_vecs2[i].begin() + (j+1)*chunk_size, // D_arr_of_chunk_vecs[i][j].begin()); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::sort(D_arr_of_chunk_vecs[i][j].begin(), D_arr_of_chunk_vecs[i][j].end()); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // // RESULT : SORT TIME = 19.6 secs // // --------------------------------------------------------------------- // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 2 // // sorting vector in-array vs sorting vector chunks after copying data into chunks // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 3.47465 secs // // sorting vector chunks after copying data into chunks - 2.3773 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // thrust::device_vector<float> D_S2_array_1(arr_size); // thrust::device_vector<float> D_S2_array_2(arr_size); // D_S2_array_1 = H_S2_array; // D_S2_array_2 = H_S2_array; // // Sort 1 dec_vector in-array // auto start = high_resolution_clock::now(); // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array_1.begin() + i*chunk_size, D_S2_array_1.begin() + (i+1)*chunk_size); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array "<< std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_S2_array_1[i] << std::endl; // std::cout << std::endl ; // start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[nchunks]; // for (int i = 0; i < nchunks; i++) // D_arr_of_chunk_vecs[i] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < nchunks; i++) // thrust::copy(D_S2_array_2.begin() + i*chunk_size, D_S2_array_2.begin() + (i+1)*chunk_size, // D_arr_of_chunk_vecs[i].begin()); // for (int i = 0; i < nchunks; i++) // thrust::sort(D_arr_of_chunk_vecs[i].begin(), D_arr_of_chunk_vecs[i].end()); // end = high_resolution_clock::now(); // duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array " << std::endl; // for (int k = 0; k < 3; k++){ // std::cout << "------chunk " << k << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_arr_of_chunk_vecs[k][i] << std::endl; // } // std::cout << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 1 // // sorting in chunks over a single vector works !! // // SOLUTION: 1 2 2 3 5 1 2 3 4 4 // // ---------------------------------------------------------- // int arr_size = 10; // int chunk_size = 5; // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::device_vector<float> D_S2_array(S2_array, S2_array + arr_size); //keys vector} // int nchunks = arr_size/chunk_size; // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array.begin() + i*chunk_size, D_S2_array.begin() + (i+1)*chunk_size); // for (int i = 0; i < arr_size; i++) // std::cout<< D_S2_array[i] << std::endl; // std::cout << std::endl; // return 0; // }
b1c86ca95589de6914d9379a1016eada676b627f.cu
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/generate.h> #include <chrono> using namespace std::chrono; int num_actions = 8; int ncells = 100*100; int nrzns = 5000; int arr_size = ncells * nrzns; int n_print = 30; int my_mod_start = 0; float my_mod(){ int a = (my_mod_start)/nrzns; my_mod_start++; return (float)a; } typedef thrust::device_vector<float>::iterator dIter; int main(){ // TEST: vectorised sort auto START = high_resolution_clock::now(); // fill host array thrust::host_vector<float> H_S2_array(arr_size); for (int i = 0; i < arr_size; i++) H_S2_array[i] = i%(nrzns/100); // to expect 100 reps of each integer after sort std::cout << std::endl; auto init_Hvec = high_resolution_clock::now(); // initialise array of device vecs thrust::device_vector<float> D_arr_of_S2_vecs[num_actions]; for(int i = 0; i< num_actions; i++) D_arr_of_S2_vecs[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); auto copy_H_to_D = high_resolution_clock::now(); // maser vector for value:: this section takes 18.0716secs !!! thrust::host_vector<float> master_vals(arr_size*num_actions); // thrust::generate(master_vals.begin(), master_vals.end(), my_mod); for (int i = 0; i < arr_size*num_actions; i++) master_vals[i] = (int)(i/nrzns); // for(int i = 0; i < nrzns; i++) // std::cout << master_vals[i] << ", "; auto generate = high_resolution_clock::now(); // check master_vals thrust::device_vector<float> D_master_vals(arr_size*num_actions); D_master_vals = master_vals; std::cout << "starting jugaad sort" << std::endl; auto start = high_resolution_clock::now(); thrust::device_vector<float> master_S2_vector(arr_size*num_actions); for(int i = 0; i< num_actions; i++) thrust::copy(D_arr_of_S2_vecs[i].begin(), D_arr_of_S2_vecs[i].end(), master_S2_vector.begin() + i*arr_size); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto mid = high_resolution_clock::now(); thrust::stable_sort_by_key(master_S2_vector.begin(), master_S2_vector.end(), D_master_vals.begin()); thrust::stable_sort_by_key(D_master_vals.begin(), D_master_vals.end(), master_S2_vector.begin()); cudaDeviceSynchronize(); // for(int i = 0; i < arr_size*3; i++) // std::cout<< master_S2_vector[i] << ", " ; // std::cout << std::endl; auto end = high_resolution_clock::now(); auto duration1 = duration_cast<microseconds>(end - start); std::cout << "copy + sort time = "<< duration1.count()/1e6 << std::endl; auto duration2 = duration_cast<microseconds>(end - mid); std::cout << "only sort time = "<< duration2.count()/1e6 << std::endl; thrust::device_vector<float> D_ones(nrzns, 1); int num_vecs = arr_size * num_actions / nrzns ; thrust::device_vector<float> D_red_S2[num_vecs]; thrust::device_vector<float> D_red_counts[num_vecs]; for (int i = 0; i < num_vecs; i++){ D_red_S2[i] = thrust::device_vector<float>(nrzns); D_red_counts[i] = thrust::device_vector<float>(nrzns); } thrust::device_vector<float> D_redS2_size(num_vecs); thrust::pair<dIter, dIter> new_end; auto red_start = high_resolution_clock::now(); // This section takes 3 seconds for (int i = 0; i < num_vecs; i++){ new_end = thrust::reduce_by_key(master_S2_vector.begin() + (i*nrzns), master_S2_vector.begin() + ((i+1)*nrzns), D_ones.begin(), D_red_S2[i].begin(), D_red_counts[i].begin()); // D_redS2_size[i] = new_end.first - &D_red_S2[i][0]; // std::cout << D_redS2_size[i] << std::endl; } auto red_end = high_resolution_clock::now(); auto red_duration = duration_cast<microseconds>(red_end - red_start); std::cout << "reduce_by_key = "<< red_duration.count()/1e6 << std::endl; auto time_spent = duration_cast<microseconds>(init_Hvec - START); std::cout << "initialise H_vec = "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(copy_H_to_D - init_Hvec); std::cout << "copy_H_to_D= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(generate - copy_H_to_D); std::cout << "generate= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(red_end - START); std::cout << "Total time= "<< time_spent.count()/1e6 << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "vec[" << i << "]" << std::endl; // for (int j = 0; j < 110; j++) // std::cout<< D_red_S2[i][j] << " , " << D_red_counts[i][j] << std::endl; // } return 0; } // int main(){ // // TEST: array of vectors do not form contiguous array elements // int num_actions = 8; // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int n_vecs = 5; // int vec_size = 4; // thrust::device_vector<float> arr_of_vec[n_vecs]; // for(int i = 0; i< n_vecs; i++) // arr_of_vec[i] = thrust::device_vector<float>(vec_size); // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // arr_of_vec[i][j] = vec_size*i + j; // // std::cout << arr_of_vec[vec_size] << std::endl; // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // std::cout << &arr_of_vec[i][j] << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // // TEST 3 // // sorting array of vectors in-array vs sorting vector chunks after copying data into chunks for each vector in array of vectors // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 28.8 secs // // sorting vector chunks after copying data into chunks - 19.6 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // int num_actions = 8; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // // // --------------------------------------------------------------------- // // // array of S2_vecs // // thrust::device_vector<float> D_arr_of_S2_vecs1[num_actions]; // // for(int i =0; i< num_actions; i++) // // D_arr_of_S2_vecs1[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // // auto start = high_resolution_clock::now(); // // for (int i = 0; i< num_actions; i++) // // for (int j = 0; j< nchunks; j++) // // thrust::sort(D_arr_of_S2_vecs1[i].begin() + j*chunk_size, D_arr_of_S2_vecs1[i].begin() + (j+1)*chunk_size); // // auto end = high_resolution_clock::now(); // // auto duration = duration_cast<microseconds>(end - start); // // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // // // RESULT : SORT TIME = 28.8 secs // // // --------------------------------------------------------------------- // // --------------------------------------------------------------------- // // array of S2_vecs // thrust::device_vector<float> D_arr_of_S2_vecs2[num_actions]; // for(int i =0; i< num_actions; i++) // D_arr_of_S2_vecs2[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // auto start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[num_actions][nchunks]; // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // D_arr_of_chunk_vecs[i][j] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::copy(D_arr_of_S2_vecs2[i].begin() + j*chunk_size, D_arr_of_S2_vecs2[i].begin() + (j+1)*chunk_size, // D_arr_of_chunk_vecs[i][j].begin()); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::sort(D_arr_of_chunk_vecs[i][j].begin(), D_arr_of_chunk_vecs[i][j].end()); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // // RESULT : SORT TIME = 19.6 secs // // --------------------------------------------------------------------- // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 2 // // sorting vector in-array vs sorting vector chunks after copying data into chunks // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 3.47465 secs // // sorting vector chunks after copying data into chunks - 2.3773 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // thrust::device_vector<float> D_S2_array_1(arr_size); // thrust::device_vector<float> D_S2_array_2(arr_size); // D_S2_array_1 = H_S2_array; // D_S2_array_2 = H_S2_array; // // Sort 1 dec_vector in-array // auto start = high_resolution_clock::now(); // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array_1.begin() + i*chunk_size, D_S2_array_1.begin() + (i+1)*chunk_size); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array "<< std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_S2_array_1[i] << std::endl; // std::cout << std::endl ; // start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[nchunks]; // for (int i = 0; i < nchunks; i++) // D_arr_of_chunk_vecs[i] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < nchunks; i++) // thrust::copy(D_S2_array_2.begin() + i*chunk_size, D_S2_array_2.begin() + (i+1)*chunk_size, // D_arr_of_chunk_vecs[i].begin()); // for (int i = 0; i < nchunks; i++) // thrust::sort(D_arr_of_chunk_vecs[i].begin(), D_arr_of_chunk_vecs[i].end()); // end = high_resolution_clock::now(); // duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array " << std::endl; // for (int k = 0; k < 3; k++){ // std::cout << "------chunk " << k << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_arr_of_chunk_vecs[k][i] << std::endl; // } // std::cout << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 1 // // sorting in chunks over a single vector works !! // // SOLUTION: 1 2 2 3 5 1 2 3 4 4 // // ---------------------------------------------------------- // int arr_size = 10; // int chunk_size = 5; // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::device_vector<float> D_S2_array(S2_array, S2_array + arr_size); //keys vector} // int nchunks = arr_size/chunk_size; // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array.begin() + i*chunk_size, D_S2_array.begin() + (i+1)*chunk_size); // for (int i = 0; i < arr_size; i++) // std::cout<< D_S2_array[i] << std::endl; // std::cout << std::endl; // return 0; // }
b3716dadcb39c80e9df8263d87b6f2906a2a8abd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <math.h> #include <iostream> using namespace std; #define BLOCKSIZE 524 #define WORDLENGTH 500 #define K WORDLENGTH #define threadNum BLOCKSIZE __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void CountPosition_slow(const char* text, int* pos) { int textIdx = threadIdx.x + blockIdx.x*blockDim.x; int result = pos[textIdx]; while(textIdx >= 0 && text[textIdx] != '\n') { result++; textIdx--; } pos[ threadIdx.x + blockIdx.x*blockDim.x] = result; } __global__ void BuildTree_CountPosition(const char* text, int* pos) { //__global__ void BuildTree_CountPosition(const char* text, int* pos, int* debug_tree) { //build trees int N = blockDim.x + WORDLENGTH-1; //N has to be assigned blockDim.x not BLOCKSIZE!!!!!! __shared__ int tree[(WORDLENGTH+BLOCKSIZE-1)*2-1]; int textIdx = threadIdx.x + blockIdx.x*blockDim.x; int treeIdx = threadIdx.x+(N-1)+(WORDLENGTH-1); //initialize the bottom of the tree tree[treeIdx] = (text[textIdx] == '\n')? 0: 1; //if(blockIdx.x == 1) debug_tree[treeIdx] = tree[treeIdx]; if(threadIdx.x < (WORDLENGTH-1)) { if(blockIdx.x != 0) { tree[treeIdx-(WORDLENGTH-1)] = (text[textIdx-(WORDLENGTH-1)] == '\n')? 0: 1; //if(blockIdx.x == 1) debug_tree[treeIdx-(WORDLENGTH-1)] = tree[treeIdx-(WORDLENGTH-1)]; } else tree[treeIdx-(WORDLENGTH-1)] = 0; } __syncthreads(); //debug //if(threadIdx.x < (WORDLENGTH-1) && threadIdx.x != 0) pos[textIdx-(WORDLENGTH-1)] = tree[treeIdx-(WORDLENGTH-1)]; //pos[textIdx] = tree[treeIdx]; //build the upper tree int parent, leftChild, rightChild; for(int p = N/2-1; p > 0; p = (p-1)/2 ) { //not including root //int p = N/2-1; parent = threadIdx.x+p; leftChild = parent*2+1; rightChild = parent*2+2; if(threadIdx.x <= p) { tree[parent] = ( (tree[leftChild] & tree[rightChild]) == 0)? 0: tree[leftChild]+tree[rightChild]; //pos[ threadIdx.x + blockIdx.x*blockDim.x] = tree[parent]; //if(blockIdx.x == 1) debug_tree[parent] = tree[parent]; } __syncthreads(); } //root if((tree[1] & tree[2]) == 0) tree[0] = 0; else tree[0] = tree[1] + tree[2]; //if(blockIdx.x == 1) debug_tree[0] = tree[0]; // printf("Tree[ %d ] = %d \n", treeIdx, tree[treeIdx]); //count position bool lastStep = 1;// 1 from right, 0 from child textIdx = threadIdx.x + blockIdx.x*blockDim.x; treeIdx = threadIdx.x+(N-1)+(WORDLENGTH-1); int idx = treeIdx; //count up while(idx >= 0 && tree[idx] != 0) { if(lastStep) pos[textIdx] += tree[idx];//from right else pos[textIdx] += (tree[idx]/2);//from child if( ( (idx+1) & -(idx+1) ) == (idx+1) ) break;//if the node is at left most side //find next if(idx%2) { --idx; //from right lastStep = 1; } else { idx = idx/2-1; //from child lastStep = 0; } } //count down if( ( ( (idx+1) & -(idx+1) ) == (idx+1) && tree[idx] != 0) || idx >= N-1) return;//if the node is at left most side and is not zero, or it is the zero bottom node if(lastStep) idx = idx*2+2;//from right, next is right child else idx = idx*2+1;//from child while( idx > 0 && idx < N-1 && tree[idx] == 0) { idx = idx*2+2; } pos[textIdx]+=tree[idx]; } void CountPosition(const char *text, int *pos, int text_size) //void CountPosition(const char *text, int *pos, int* debug_tree, int text_size) { //BuildTree_CountPosition<<<(text_size+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(text, pos); //BuildTree_CountPosition<<<(text_size+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(text, pos, debug_tree); hipLaunchKernelGGL(( CountPosition_slow), dim3((text_size+1023)/1024), dim3(1024), 0, 0, text, pos); } struct isOne { __host__ __device__ bool operator()(const int x) { return (x == 1); } }; int ExtractHead(const int *pos, int *head, int text_size) { int *buffer; int nhead; hipMalloc(&buffer, sizeof(int)*text_size*2); // this is enough thrust::device_ptr<const int> pos_d(pos); thrust::device_ptr<int> head_d(head), flag_d(buffer), cumsum_d(buffer+text_size); // TODO thrust::sequence(flag_d, cumsum_d); nhead = thrust::copy_if(flag_d, cumsum_d, pos_d, head_d, isOne()) - head_d; //do not touch this hipFree(buffer); return nhead; } __global__ void Part3_transform(char* text, char* temp_text, int* pos) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(pos[idx] != 0) { if(pos[idx]%4 == 1 || pos[idx]%4 == 2) { if(pos[idx+2] != 0) temp_text[idx] = text[idx+2]; else temp_text[idx] = '-'; } else { temp_text[idx] = text[idx-2]; } } } __global__ void Part3_assign(char* text, char* temp_text) { int idx = threadIdx.x + blockIdx.x*blockDim.x; text[idx] = temp_text[idx]; //if(idx >= 300 && idx < 400) printf("text_gpu[%d] = %c\n", idx, text[idx]); } void Part3(char *text, int *pos, int *head, int text_size, int n_head) { //Characters of the 1st & the 3rd position swap; the 2nd & the 4th swap; 5th & 7th swap, etc. //For the last charaters (at most 2 characters) that don't have a partner to swap, fill them with '-'. char *temp_text; hipMalloc(&temp_text, sizeof(char)*text_size); hipLaunchKernelGGL(( Part3_transform), dim3((text_size+1023)/1024), dim3(1024), 0, 0, text, temp_text, pos); hipLaunchKernelGGL(( Part3_assign), dim3((text_size+1023)/1024), dim3(1024), 0, 0, text, temp_text); hipFree(temp_text); }
b3716dadcb39c80e9df8263d87b6f2906a2a8abd.cu
#include "counting.h" #include <cstdio> #include <cassert> #include <thrust/scan.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <thrust/copy.h> #include <thrust/sequence.h> #include <math.h> #include <iostream> using namespace std; #define BLOCKSIZE 524 #define WORDLENGTH 500 #define K WORDLENGTH #define threadNum BLOCKSIZE __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __device__ __host__ int CeilAlign(int a, int b) { return CeilDiv(a, b) * b; } __global__ void CountPosition_slow(const char* text, int* pos) { int textIdx = threadIdx.x + blockIdx.x*blockDim.x; int result = pos[textIdx]; while(textIdx >= 0 && text[textIdx] != '\n') { result++; textIdx--; } pos[ threadIdx.x + blockIdx.x*blockDim.x] = result; } __global__ void BuildTree_CountPosition(const char* text, int* pos) { //__global__ void BuildTree_CountPosition(const char* text, int* pos, int* debug_tree) { //build trees int N = blockDim.x + WORDLENGTH-1; //N has to be assigned blockDim.x not BLOCKSIZE!!!!!! __shared__ int tree[(WORDLENGTH+BLOCKSIZE-1)*2-1]; int textIdx = threadIdx.x + blockIdx.x*blockDim.x; int treeIdx = threadIdx.x+(N-1)+(WORDLENGTH-1); //initialize the bottom of the tree tree[treeIdx] = (text[textIdx] == '\n')? 0: 1; //if(blockIdx.x == 1) debug_tree[treeIdx] = tree[treeIdx]; if(threadIdx.x < (WORDLENGTH-1)) { if(blockIdx.x != 0) { tree[treeIdx-(WORDLENGTH-1)] = (text[textIdx-(WORDLENGTH-1)] == '\n')? 0: 1; //if(blockIdx.x == 1) debug_tree[treeIdx-(WORDLENGTH-1)] = tree[treeIdx-(WORDLENGTH-1)]; } else tree[treeIdx-(WORDLENGTH-1)] = 0; } __syncthreads(); //debug //if(threadIdx.x < (WORDLENGTH-1) && threadIdx.x != 0) pos[textIdx-(WORDLENGTH-1)] = tree[treeIdx-(WORDLENGTH-1)]; //pos[textIdx] = tree[treeIdx]; //build the upper tree int parent, leftChild, rightChild; for(int p = N/2-1; p > 0; p = (p-1)/2 ) { //not including root //int p = N/2-1; parent = threadIdx.x+p; leftChild = parent*2+1; rightChild = parent*2+2; if(threadIdx.x <= p) { tree[parent] = ( (tree[leftChild] & tree[rightChild]) == 0)? 0: tree[leftChild]+tree[rightChild]; //pos[ threadIdx.x + blockIdx.x*blockDim.x] = tree[parent]; //if(blockIdx.x == 1) debug_tree[parent] = tree[parent]; } __syncthreads(); } //root if((tree[1] & tree[2]) == 0) tree[0] = 0; else tree[0] = tree[1] + tree[2]; //if(blockIdx.x == 1) debug_tree[0] = tree[0]; // printf("Tree[ %d ] = %d \n", treeIdx, tree[treeIdx]); //count position bool lastStep = 1;// 1 from right, 0 from child textIdx = threadIdx.x + blockIdx.x*blockDim.x; treeIdx = threadIdx.x+(N-1)+(WORDLENGTH-1); int idx = treeIdx; //count up while(idx >= 0 && tree[idx] != 0) { if(lastStep) pos[textIdx] += tree[idx];//from right else pos[textIdx] += (tree[idx]/2);//from child if( ( (idx+1) & -(idx+1) ) == (idx+1) ) break;//if the node is at left most side //find next if(idx%2) { --idx; //from right lastStep = 1; } else { idx = idx/2-1; //from child lastStep = 0; } } //count down if( ( ( (idx+1) & -(idx+1) ) == (idx+1) && tree[idx] != 0) || idx >= N-1) return;//if the node is at left most side and is not zero, or it is the zero bottom node if(lastStep) idx = idx*2+2;//from right, next is right child else idx = idx*2+1;//from child while( idx > 0 && idx < N-1 && tree[idx] == 0) { idx = idx*2+2; } pos[textIdx]+=tree[idx]; } void CountPosition(const char *text, int *pos, int text_size) //void CountPosition(const char *text, int *pos, int* debug_tree, int text_size) { //BuildTree_CountPosition<<<(text_size+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(text, pos); //BuildTree_CountPosition<<<(text_size+BLOCKSIZE-1)/BLOCKSIZE, BLOCKSIZE>>>(text, pos, debug_tree); CountPosition_slow<<<(text_size+1023)/1024, 1024>>>(text, pos); } struct isOne { __host__ __device__ bool operator()(const int x) { return (x == 1); } }; int ExtractHead(const int *pos, int *head, int text_size) { int *buffer; int nhead; cudaMalloc(&buffer, sizeof(int)*text_size*2); // this is enough thrust::device_ptr<const int> pos_d(pos); thrust::device_ptr<int> head_d(head), flag_d(buffer), cumsum_d(buffer+text_size); // TODO thrust::sequence(flag_d, cumsum_d); nhead = thrust::copy_if(flag_d, cumsum_d, pos_d, head_d, isOne()) - head_d; //do not touch this cudaFree(buffer); return nhead; } __global__ void Part3_transform(char* text, char* temp_text, int* pos) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if(pos[idx] != 0) { if(pos[idx]%4 == 1 || pos[idx]%4 == 2) { if(pos[idx+2] != 0) temp_text[idx] = text[idx+2]; else temp_text[idx] = '-'; } else { temp_text[idx] = text[idx-2]; } } } __global__ void Part3_assign(char* text, char* temp_text) { int idx = threadIdx.x + blockIdx.x*blockDim.x; text[idx] = temp_text[idx]; //if(idx >= 300 && idx < 400) printf("text_gpu[%d] = %c\n", idx, text[idx]); } void Part3(char *text, int *pos, int *head, int text_size, int n_head) { //Characters of the 1st & the 3rd position swap; the 2nd & the 4th swap; 5th & 7th swap, etc. //For the last charaters (at most 2 characters) that don't have a partner to swap, fill them with '-'. char *temp_text; cudaMalloc(&temp_text, sizeof(char)*text_size); Part3_transform<<<(text_size+1023)/1024, 1024>>>(text, temp_text, pos); Part3_assign<<<(text_size+1023)/1024, 1024>>>(text, temp_text); cudaFree(temp_text); }
4b92ecab877a2efa66db7a10cf993fd112b5b4cf.hip
// !!! This is a file automatically generated by hipify!!! // HW#7 22181250 #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> extern "C" void gpu_Gabor(float *pcuSrc, float *pcuDst, int w, int h, float *cuGkernel, int kernel_size); __global__ void cuda_Filter2D(float * pSrcImage, int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight, float *pDstImage) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int index = y*SrcWidth + x; int border; float temp; // if (x > KWidth / 2 && y > (KHeight / 2) && x < (SrcWidth - KWidth / 2) && y < (SrcHeight - KHeight / 2)) { temp = 0; for (int i = 0; i < KHeight; i++) { for (int j = 0; j < KWidth; j++) { border = (y + i)*SrcWidth + (x + j); temp += (pSrcImage[border] * pKernel[i*KWidth + j]); } } pDstImage[index] = temp; // __syncthreads(); } else { pDstImage[index] = 0; } } __global__ void cuda_Shared_Filter2D(float * pSrcImage, int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight, float *pDstImage) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int index = y*SrcWidth + x; int border; float temp; int tx = threadIdx.x; int ty = threadIdx.y; extern __shared__ float gmat[]; if (x > KWidth / 2 && y >(KHeight / 2) && x < (SrcWidth - KWidth / 2) && y < (SrcHeight - KHeight / 2)) { temp = 0; if (tx < KWidth&&ty < KHeight) { // shared memory gmat pKernel gmat[ty*KWidth + tx] = pKernel[ty*KWidth + tx]; } for (int i = 0; i < KHeight; i++) { for (int j = 0; j < KWidth; j++) { border = (y + i)*SrcWidth + (x + j); temp += (pSrcImage[border] * gmat[i*KWidth + j]); } } pDstImage[index] = temp; // __syncthreads(); } else { pDstImage[index] = 0; } } void gpu_Gabor(float *pcuSrc, float *pcuDst, int w, int h, float *cuGkernel, int kernel_size) { // block block grid dim3 block = dim3(16, 16, 1); // 16 16 1 threads per block dim3 grid = dim3(h / block.x, w / block.y); // 32 *16 thread blocks cuda_Filter2D << < grid, block >> >(pcuSrc, w, h, cuGkernel, kernel_size, kernel_size, pcuDst); // cuda_Shared_Filter2D << < grid, block,sizeof(float)*kernel_size*kernel_size >> >(pcuSrc, w, h, cuGkernel, kernel_size, kernel_size, pcuDst); hipDeviceSynchronize(); float *PrintKernel = new float[kernel_size*kernel_size]; hipMemcpy(PrintKernel, cuGkernel, kernel_size*kernel_size * sizeof(float), hipMemcpyDeviceToHost); for (int i = 0; i<kernel_size; i++) { for (int j = 0; j<kernel_size; j++) { printf("%f\t", PrintKernel[i*kernel_size + j]); } printf("\n"); } }
4b92ecab877a2efa66db7a10cf993fd112b5b4cf.cu
// HW#7 22181250 윤정언 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> extern "C" void gpu_Gabor(float *pcuSrc, float *pcuDst, int w, int h, float *cuGkernel, int kernel_size); __global__ void cuda_Filter2D(float * pSrcImage, int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight, float *pDstImage) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int index = y*SrcWidth + x; int border; float temp; // if (x > KWidth / 2 && y > (KHeight / 2) && x < (SrcWidth - KWidth / 2) && y < (SrcHeight - KHeight / 2)) { temp = 0; for (int i = 0; i < KHeight; i++) { for (int j = 0; j < KWidth; j++) { border = (y + i)*SrcWidth + (x + j); temp += (pSrcImage[border] * pKernel[i*KWidth + j]); } } pDstImage[index] = temp; // __syncthreads(); } else { pDstImage[index] = 0; } } __global__ void cuda_Shared_Filter2D(float * pSrcImage, int SrcWidth, int SrcHeight, float *pKernel, int KWidth, int KHeight, float *pDstImage) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; int index = y*SrcWidth + x; int border; float temp; int tx = threadIdx.x; int ty = threadIdx.y; extern __shared__ float gmat[]; if (x > KWidth / 2 && y >(KHeight / 2) && x < (SrcWidth - KWidth / 2) && y < (SrcHeight - KHeight / 2)) { temp = 0; if (tx < KWidth&&ty < KHeight) { // shared memory를 쓰기위해 gmat으로 pKernel 한번에 읽어 준비함 gmat[ty*KWidth + tx] = pKernel[ty*KWidth + tx]; } for (int i = 0; i < KHeight; i++) { for (int j = 0; j < KWidth; j++) { border = (y + i)*SrcWidth + (x + j); temp += (pSrcImage[border] * gmat[i*KWidth + j]); } } pDstImage[index] = temp; // __syncthreads(); } else { pDstImage[index] = 0; } } void gpu_Gabor(float *pcuSrc, float *pcuDst, int w, int h, float *cuGkernel, int kernel_size) { // block 준비하고 block에 맞게 grid 나눔 dim3 block = dim3(16, 16, 1); // 16 16 1 threads per block dim3 grid = dim3(h / block.x, w / block.y); // 32 *16 thread blocks cuda_Filter2D << < grid, block >> >(pcuSrc, w, h, cuGkernel, kernel_size, kernel_size, pcuDst); // cuda_Shared_Filter2D << < grid, block,sizeof(float)*kernel_size*kernel_size >> >(pcuSrc, w, h, cuGkernel, kernel_size, kernel_size, pcuDst); cudaThreadSynchronize(); float *PrintKernel = new float[kernel_size*kernel_size]; cudaMemcpy(PrintKernel, cuGkernel, kernel_size*kernel_size * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i<kernel_size; i++) { for (int j = 0; j<kernel_size; j++) { printf("%f\t", PrintKernel[i*kernel_size + j]); } printf("\n"); } }
525e8c5659266ed7dd3a23bc27e31a890ff7e6a5.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO Avro reader class implementation **/ #include "reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> namespace cudf { namespace experimental { namespace io { namespace detail { namespace avro { // Import functionality that's independent of legacy code using namespace cudf::io::avro; using namespace cudf::io; namespace { /** * @brief Function that translates Avro data kind to cuDF type enum **/ type_id to_type_id(const avro::schema_entry *col) { switch (col->kind) { case avro::type_boolean: return type_id::BOOL8; case avro::type_int: return type_id::INT32; case avro::type_long: return type_id::INT64; case avro::type_float: return type_id::FLOAT32; case avro::type_double: return type_id::FLOAT64; case avro::type_bytes: case avro::type_string: return type_id::STRING; case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32; default: return type_id::EMPTY; } } } // namespace /** * @brief A helper wrapper for Avro file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema **/ class metadata : public file_metadata { public: explicit metadata(datasource *const src) : source(src) {} /** * @brief Initializes the parser and filters down to a subset of rows * * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected **/ void init_and_select_rows(int &row_start, int &row_count) { const auto buffer = source->get_buffer(0, source->size()); avro::container pod(buffer->data(), buffer->size()); CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata"); row_start = skip_rows; row_count = num_rows; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * * @return List of column names **/ auto select_columns(std::vector<std::string> use_names) { std::vector<std::pair<int, std::string>> selection; const auto num_avro_columns = static_cast<int>(columns.size()); if (!use_names.empty()) { int index = 0; for (const auto &use_name : use_names) { for (int i = 0; i < num_avro_columns; ++i, ++index) { if (index >= num_avro_columns) { index = 0; } if (columns[index].name == use_name && type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) { selection.emplace_back(index, columns[index].name); index++; break; } } } } else { for (int i = 0; i < num_avro_columns; ++i) { auto col_type = to_type_id(&schema[columns[i].schema_data_idx]); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type"); selection.emplace_back(i, columns[i].name); } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); return selection; } private: datasource *const source; }; rmm::device_buffer reader::impl::decompress_data( const rmm::device_buffer &comp_block_data, hipStream_t stream) { size_t uncompressed_data_size = 0; hostdevice_vector<gpu_inflate_input_s> inflate_in( _metadata->block_list.size()); hostdevice_vector<gpu_inflate_status_s> inflate_out( _metadata->block_list.size()); if (_metadata->codec == "deflate") { // Guess an initial maximum uncompressed block size uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff; uncompressed_data_size = initial_blk_len * _metadata->block_list.size(); for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; } } else if (_metadata->codec == "snappy") { // Extract the uncompressed length from the snappy stream for (size_t i = 0; i < _metadata->block_list.size(); i++) { const auto buffer = _source->get_buffer(_metadata->block_list[i].offset, 4); const uint8_t *blk = buffer->data(); uint32_t blk_len = blk[0]; if (blk_len > 0x7f) { blk_len = (blk_len & 0x7f) | (blk[1] << 7); if (blk_len > 0x3fff) { blk_len = (blk_len & 0x3fff) | (blk[2] << 14); if (blk_len > 0x1fffff) { blk_len = (blk_len & 0x1fffff) | (blk[3] << 21); } } } inflate_in[i].dstSize = blk_len; uncompressed_data_size += blk_len; } } else { CUDF_FAIL("Unsupported compression codec\n"); } rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); const auto base_offset = _metadata->block_list[0].offset; for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) { const auto src_pos = _metadata->block_list[i].offset - base_offset; inflate_in[i].srcDevice = static_cast<const uint8_t *>(comp_block_data.data()) + src_pos; inflate_in[i].srcSize = _metadata->block_list[i].size; inflate_in[i].dstDevice = static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos; // Update blocks offsets & sizes to refer to uncompressed data _metadata->block_list[i].offset = dst_pos; _metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += _metadata->block_list[i].size; } for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) { CUDA_TRY(hipMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(), inflate_in.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(hipMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream)); if (_metadata->codec == "deflate") { CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream)); } else if (_metadata->codec == "snappy") { CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), stream)); } else { CUDF_FAIL("Unsupported compression codec\n"); } CUDA_TRY(hipMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(), inflate_out.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); // Check if larger output is required, as it's not known ahead of time if (_metadata->codec == "deflate" && !loop_cnt) { size_t actual_uncompressed_size = 0; for (size_t i = 0; i < _metadata->block_list.size(); i++) { // If error status is 1 (buffer too small), the `bytes_written` field // is actually contains the uncompressed data size if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) { inflate_in[i].dstSize = inflate_out[i].bytes_written; } actual_uncompressed_size += inflate_in[i].dstSize; } if (actual_uncompressed_size > uncompressed_data_size) { decomp_block_data.resize(actual_uncompressed_size); for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) { auto dst_base = static_cast<uint8_t *>(decomp_block_data.data()); inflate_in[i].dstDevice = dst_base + dst_pos; _metadata->block_list[i].offset = dst_pos; _metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += _metadata->block_list[i].size; } } else { break; } } else { break; } } return decomp_block_data; } void reader::impl::decode_data( const rmm::device_buffer &block_data, const std::vector<std::pair<uint32_t, uint32_t>> &dict, const hostdevice_vector<uint8_t> &global_dictionary, size_t total_dictionary_entries, size_t num_rows, std::vector<std::pair<int, std::string>> selection, std::vector<column_buffer> &out_buffers, hipStream_t stream) { // Build gpu schema hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size()); uint32_t min_row_data_size = 0; int skip_field_cnt = 0; for (size_t i = 0; i < _metadata->schema.size(); i++) { type_kind_e kind = _metadata->schema[i].kind; if (skip_field_cnt != 0) { // Exclude union members from min_row_data_size skip_field_cnt += _metadata->schema[i].num_children - 1; } else { switch (kind) { case type_union: skip_field_cnt = _metadata->schema[i].num_children; // fall through case type_boolean: case type_int: case type_long: case type_bytes: case type_string: case type_enum: min_row_data_size += 1; break; case type_float: min_row_data_size += 4; break; case type_double: min_row_data_size += 8; break; default: break; } } if (kind == type_enum && !_metadata->schema[i].symbols.size()) { kind = type_int; } schema_desc[i].kind = kind; schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children; schema_desc[i].dataptr = nullptr; CUDF_EXPECTS(kind != type_union || _metadata->schema[i].num_children < 2 || (_metadata->schema[i].num_children == 2 && (_metadata->schema[i + 1].kind == type_null || _metadata->schema[i + 2].kind == type_null)), "Union with non-null type not currently supported"); } std::vector<void *> valid_alias(out_buffers.size(), nullptr); for (size_t i = 0; i < out_buffers.size(); i++) { const auto col_idx = selection[i].first; int schema_data_idx = _metadata->columns[col_idx].schema_data_idx; int schema_null_idx = _metadata->columns[col_idx].schema_null_idx; schema_desc[schema_data_idx].dataptr = out_buffers[i].data(); if (schema_null_idx >= 0) { if (!schema_desc[schema_null_idx].dataptr) { schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask(); } else { valid_alias[i] = schema_desc[schema_null_idx].dataptr; } } if (_metadata->schema[schema_data_idx].kind == type_enum) { schema_desc[schema_data_idx].count = dict[i].first; } if (out_buffers[i].null_mask_size()) { set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream); } } rmm::device_buffer block_list( _metadata->block_list.data(), _metadata->block_list.size() * sizeof(block_desc_s), stream); CUDA_TRY(hipMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(), schema_desc.memory_size(), hipMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodeAvroColumnData( static_cast<block_desc_s *>(block_list.data()), schema_desc.device_ptr(), reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()), static_cast<const uint8_t *>(block_data.data()), static_cast<uint32_t>(_metadata->block_list.size()), static_cast<uint32_t>(schema_desc.size()), static_cast<uint32_t>(total_dictionary_entries), _metadata->num_rows, _metadata->skip_rows, min_row_data_size, stream)); // Copy valid bits that are shared between columns for (size_t i = 0; i < out_buffers.size(); i++) { if (valid_alias[i] != nullptr) { CUDA_TRY(hipMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i], out_buffers[i].null_mask_size(), hipMemcpyHostToDevice, stream)); } } CUDA_TRY(hipMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(), schema_desc.memory_size(), hipMemcpyDeviceToHost, stream)); CUDA_TRY(hipStreamSynchronize(stream)); for (size_t i = 0; i < out_buffers.size(); i++) { const auto col_idx = selection[i].first; const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx; out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0; } } reader::impl::impl(std::unique_ptr<datasource> source, reader_options const &options, rmm::mr::device_memory_resource *mr) : _source(std::move(source)), _mr(mr), _columns(options.columns) { // Open the source Avro dataset metadata _metadata = std::make_unique<metadata>(_source.get()); } table_with_metadata reader::impl::read(int skip_rows, int num_rows, hipStream_t stream) { std::vector<std::unique_ptr<column>> out_columns; table_metadata metadata_out; // Select and read partial metadata / schema within the subset of rows _metadata->init_and_select_rows(skip_rows, num_rows); // Select only columns required by the options auto selected_columns = _metadata->select_columns(_columns); if (selected_columns.size() != 0) { // Get a list of column data types std::vector<data_type> column_types; for (const auto &col : selected_columns) { auto &col_schema = _metadata->schema[_metadata->columns[col.first].schema_data_idx]; auto col_type = to_type_id(&col_schema); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } if (_metadata->total_data_size > 0) { const auto buffer = _source->get_buffer(_metadata->block_list[0].offset, _metadata->total_data_size); rmm::device_buffer block_data(buffer->data(), buffer->size(), stream); if (_metadata->codec != "" && _metadata->codec != "null") { auto decomp_block_data = decompress_data(block_data, stream); block_data = std::move(decomp_block_data); } else { auto dst_ofs = _metadata->block_list[0].offset; for (size_t i = 0; i < _metadata->block_list.size(); i++) { _metadata->block_list[i].offset -= dst_ofs; } } size_t total_dictionary_entries = 0; size_t dictionary_data_size = 0; std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx]; dict[i].first = static_cast<uint32_t>(total_dictionary_entries); dict[i].second = static_cast<uint32_t>(col_schema.symbols.size()); total_dictionary_entries += dict[i].second; for (const auto &sym : col_schema.symbols) { dictionary_data_size += sym.length(); } } hostdevice_vector<uint8_t> global_dictionary( total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size); if (total_dictionary_entries > 0) { size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx]; auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>( global_dictionary.host_ptr()))[dict[i].first]; for (size_t j = 0; j < dict[i].second; j++) { size_t len = col_schema.symbols[j].length(); char *ptr = reinterpret_cast<char *>( global_dictionary.device_ptr() + dict_pos); index[j].ptr = ptr; index[j].count = len; memcpy(global_dictionary.host_ptr() + dict_pos, col_schema.symbols[j].c_str(), len); dict_pos += len; } } CUDA_TRY(hipMemcpyAsync( global_dictionary.device_ptr(), global_dictionary.host_ptr(), global_dictionary.memory_size(), hipMemcpyHostToDevice, stream)); } std::vector<column_buffer> out_buffers; for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; bool is_nullable = (_metadata->columns[col_idx].schema_null_idx >= 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr); } decode_data(block_data, dict, global_dictionary, total_dictionary_entries, num_rows, selected_columns, out_buffers, stream); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(column_types[i], num_rows, out_buffers[i], stream, _mr)); } } } // Return column names (must match order of returned columns) metadata_out.column_names.resize(selected_columns.size()); for (size_t i = 0; i < selected_columns.size(); i++) { metadata_out.column_names[i] = selected_columns[i].second; } // Return user metadata metadata_out.user_data = _metadata->user_data; return { std::make_unique<table>(std::move(out_columns)), std::move(metadata_out) }; } // Forward to implementation reader::reader(std::string filepath, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) { } // Forward to implementation reader::reader(const char *buffer, size_t length, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(buffer, length), options, mr)) {} // Forward to implementation reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {} // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read_all(hipStream_t stream) { return _impl->read(0, -1, stream); } // Forward to implementation table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, hipStream_t stream) { return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream); } } // namespace avro } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
525e8c5659266ed7dd3a23bc27e31a890ff7e6a5.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO Avro reader class implementation **/ #include "reader_impl.hpp" #include <io/comp/gpuinflate.h> #include <cudf/table/table.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <rmm/thrust_rmm_allocator.h> #include <rmm/device_buffer.hpp> namespace cudf { namespace experimental { namespace io { namespace detail { namespace avro { // Import functionality that's independent of legacy code using namespace cudf::io::avro; using namespace cudf::io; namespace { /** * @brief Function that translates Avro data kind to cuDF type enum **/ type_id to_type_id(const avro::schema_entry *col) { switch (col->kind) { case avro::type_boolean: return type_id::BOOL8; case avro::type_int: return type_id::INT32; case avro::type_long: return type_id::INT64; case avro::type_float: return type_id::FLOAT32; case avro::type_double: return type_id::FLOAT64; case avro::type_bytes: case avro::type_string: return type_id::STRING; case avro::type_enum: return (!col->symbols.empty()) ? type_id::STRING : type_id::INT32; default: return type_id::EMPTY; } } } // namespace /** * @brief A helper wrapper for Avro file metadata. Provides some additional * convenience methods for initializing and accessing the metadata and schema **/ class metadata : public file_metadata { public: explicit metadata(datasource *const src) : source(src) {} /** * @brief Initializes the parser and filters down to a subset of rows * * @param[in,out] row_start Starting row of the selection * @param[in,out] row_count Total number of rows selected **/ void init_and_select_rows(int &row_start, int &row_count) { const auto buffer = source->get_buffer(0, source->size()); avro::container pod(buffer->data(), buffer->size()); CUDF_EXPECTS(pod.parse(this, row_count, row_start), "Cannot parse metadata"); row_start = skip_rows; row_count = num_rows; } /** * @brief Filters and reduces down to a selection of columns * * @param[in] use_names List of column names to select * * @return List of column names **/ auto select_columns(std::vector<std::string> use_names) { std::vector<std::pair<int, std::string>> selection; const auto num_avro_columns = static_cast<int>(columns.size()); if (!use_names.empty()) { int index = 0; for (const auto &use_name : use_names) { for (int i = 0; i < num_avro_columns; ++i, ++index) { if (index >= num_avro_columns) { index = 0; } if (columns[index].name == use_name && type_id::EMPTY != to_type_id(&schema[columns[index].schema_data_idx])) { selection.emplace_back(index, columns[index].name); index++; break; } } } } else { for (int i = 0; i < num_avro_columns; ++i) { auto col_type = to_type_id(&schema[columns[i].schema_data_idx]); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unsupported data type"); selection.emplace_back(i, columns[i].name); } } CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns"); return selection; } private: datasource *const source; }; rmm::device_buffer reader::impl::decompress_data( const rmm::device_buffer &comp_block_data, cudaStream_t stream) { size_t uncompressed_data_size = 0; hostdevice_vector<gpu_inflate_input_s> inflate_in( _metadata->block_list.size()); hostdevice_vector<gpu_inflate_status_s> inflate_out( _metadata->block_list.size()); if (_metadata->codec == "deflate") { // Guess an initial maximum uncompressed block size uint32_t initial_blk_len = (_metadata->max_block_size * 2 + 0xfff) & ~0xfff; uncompressed_data_size = initial_blk_len * _metadata->block_list.size(); for (size_t i = 0; i < inflate_in.size(); ++i) { inflate_in[i].dstSize = initial_blk_len; } } else if (_metadata->codec == "snappy") { // Extract the uncompressed length from the snappy stream for (size_t i = 0; i < _metadata->block_list.size(); i++) { const auto buffer = _source->get_buffer(_metadata->block_list[i].offset, 4); const uint8_t *blk = buffer->data(); uint32_t blk_len = blk[0]; if (blk_len > 0x7f) { blk_len = (blk_len & 0x7f) | (blk[1] << 7); if (blk_len > 0x3fff) { blk_len = (blk_len & 0x3fff) | (blk[2] << 14); if (blk_len > 0x1fffff) { blk_len = (blk_len & 0x1fffff) | (blk[3] << 21); } } } inflate_in[i].dstSize = blk_len; uncompressed_data_size += blk_len; } } else { CUDF_FAIL("Unsupported compression codec\n"); } rmm::device_buffer decomp_block_data(uncompressed_data_size, stream); const auto base_offset = _metadata->block_list[0].offset; for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) { const auto src_pos = _metadata->block_list[i].offset - base_offset; inflate_in[i].srcDevice = static_cast<const uint8_t *>(comp_block_data.data()) + src_pos; inflate_in[i].srcSize = _metadata->block_list[i].size; inflate_in[i].dstDevice = static_cast<uint8_t *>(decomp_block_data.data()) + dst_pos; // Update blocks offsets & sizes to refer to uncompressed data _metadata->block_list[i].offset = dst_pos; _metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += _metadata->block_list[i].size; } for (int loop_cnt = 0; loop_cnt < 2; loop_cnt++) { CUDA_TRY(cudaMemcpyAsync(inflate_in.device_ptr(), inflate_in.host_ptr(), inflate_in.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(cudaMemsetAsync(inflate_out.device_ptr(), 0, inflate_out.memory_size(), stream)); if (_metadata->codec == "deflate") { CUDA_TRY(gpuinflate(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), 0, stream)); } else if (_metadata->codec == "snappy") { CUDA_TRY(gpu_unsnap(inflate_in.device_ptr(), inflate_out.device_ptr(), inflate_in.size(), stream)); } else { CUDF_FAIL("Unsupported compression codec\n"); } CUDA_TRY(cudaMemcpyAsync(inflate_out.host_ptr(), inflate_out.device_ptr(), inflate_out.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); // Check if larger output is required, as it's not known ahead of time if (_metadata->codec == "deflate" && !loop_cnt) { size_t actual_uncompressed_size = 0; for (size_t i = 0; i < _metadata->block_list.size(); i++) { // If error status is 1 (buffer too small), the `bytes_written` field // is actually contains the uncompressed data size if (inflate_out[i].status == 1 && inflate_out[i].bytes_written > inflate_in[i].dstSize) { inflate_in[i].dstSize = inflate_out[i].bytes_written; } actual_uncompressed_size += inflate_in[i].dstSize; } if (actual_uncompressed_size > uncompressed_data_size) { decomp_block_data.resize(actual_uncompressed_size); for (size_t i = 0, dst_pos = 0; i < _metadata->block_list.size(); i++) { auto dst_base = static_cast<uint8_t *>(decomp_block_data.data()); inflate_in[i].dstDevice = dst_base + dst_pos; _metadata->block_list[i].offset = dst_pos; _metadata->block_list[i].size = static_cast<uint32_t>(inflate_in[i].dstSize); dst_pos += _metadata->block_list[i].size; } } else { break; } } else { break; } } return decomp_block_data; } void reader::impl::decode_data( const rmm::device_buffer &block_data, const std::vector<std::pair<uint32_t, uint32_t>> &dict, const hostdevice_vector<uint8_t> &global_dictionary, size_t total_dictionary_entries, size_t num_rows, std::vector<std::pair<int, std::string>> selection, std::vector<column_buffer> &out_buffers, cudaStream_t stream) { // Build gpu schema hostdevice_vector<gpu::schemadesc_s> schema_desc(_metadata->schema.size()); uint32_t min_row_data_size = 0; int skip_field_cnt = 0; for (size_t i = 0; i < _metadata->schema.size(); i++) { type_kind_e kind = _metadata->schema[i].kind; if (skip_field_cnt != 0) { // Exclude union members from min_row_data_size skip_field_cnt += _metadata->schema[i].num_children - 1; } else { switch (kind) { case type_union: skip_field_cnt = _metadata->schema[i].num_children; // fall through case type_boolean: case type_int: case type_long: case type_bytes: case type_string: case type_enum: min_row_data_size += 1; break; case type_float: min_row_data_size += 4; break; case type_double: min_row_data_size += 8; break; default: break; } } if (kind == type_enum && !_metadata->schema[i].symbols.size()) { kind = type_int; } schema_desc[i].kind = kind; schema_desc[i].count = (kind == type_enum) ? 0 : (uint32_t)_metadata->schema[i].num_children; schema_desc[i].dataptr = nullptr; CUDF_EXPECTS(kind != type_union || _metadata->schema[i].num_children < 2 || (_metadata->schema[i].num_children == 2 && (_metadata->schema[i + 1].kind == type_null || _metadata->schema[i + 2].kind == type_null)), "Union with non-null type not currently supported"); } std::vector<void *> valid_alias(out_buffers.size(), nullptr); for (size_t i = 0; i < out_buffers.size(); i++) { const auto col_idx = selection[i].first; int schema_data_idx = _metadata->columns[col_idx].schema_data_idx; int schema_null_idx = _metadata->columns[col_idx].schema_null_idx; schema_desc[schema_data_idx].dataptr = out_buffers[i].data(); if (schema_null_idx >= 0) { if (!schema_desc[schema_null_idx].dataptr) { schema_desc[schema_null_idx].dataptr = out_buffers[i].null_mask(); } else { valid_alias[i] = schema_desc[schema_null_idx].dataptr; } } if (_metadata->schema[schema_data_idx].kind == type_enum) { schema_desc[schema_data_idx].count = dict[i].first; } if (out_buffers[i].null_mask_size()) { set_null_mask(out_buffers[i].null_mask(), 0, num_rows, true, stream); } } rmm::device_buffer block_list( _metadata->block_list.data(), _metadata->block_list.size() * sizeof(block_desc_s), stream); CUDA_TRY(cudaMemcpyAsync(schema_desc.device_ptr(), schema_desc.host_ptr(), schema_desc.memory_size(), cudaMemcpyHostToDevice, stream)); CUDA_TRY(gpu::DecodeAvroColumnData( static_cast<block_desc_s *>(block_list.data()), schema_desc.device_ptr(), reinterpret_cast<gpu::nvstrdesc_s *>(global_dictionary.device_ptr()), static_cast<const uint8_t *>(block_data.data()), static_cast<uint32_t>(_metadata->block_list.size()), static_cast<uint32_t>(schema_desc.size()), static_cast<uint32_t>(total_dictionary_entries), _metadata->num_rows, _metadata->skip_rows, min_row_data_size, stream)); // Copy valid bits that are shared between columns for (size_t i = 0; i < out_buffers.size(); i++) { if (valid_alias[i] != nullptr) { CUDA_TRY(cudaMemcpyAsync(out_buffers[i].null_mask(), valid_alias[i], out_buffers[i].null_mask_size(), cudaMemcpyHostToDevice, stream)); } } CUDA_TRY(cudaMemcpyAsync(schema_desc.host_ptr(), schema_desc.device_ptr(), schema_desc.memory_size(), cudaMemcpyDeviceToHost, stream)); CUDA_TRY(cudaStreamSynchronize(stream)); for (size_t i = 0; i < out_buffers.size(); i++) { const auto col_idx = selection[i].first; const auto schema_null_idx = _metadata->columns[col_idx].schema_null_idx; out_buffers[i].null_count() = (schema_null_idx >= 0) ? schema_desc[schema_null_idx].count : 0; } } reader::impl::impl(std::unique_ptr<datasource> source, reader_options const &options, rmm::mr::device_memory_resource *mr) : _source(std::move(source)), _mr(mr), _columns(options.columns) { // Open the source Avro dataset metadata _metadata = std::make_unique<metadata>(_source.get()); } table_with_metadata reader::impl::read(int skip_rows, int num_rows, cudaStream_t stream) { std::vector<std::unique_ptr<column>> out_columns; table_metadata metadata_out; // Select and read partial metadata / schema within the subset of rows _metadata->init_and_select_rows(skip_rows, num_rows); // Select only columns required by the options auto selected_columns = _metadata->select_columns(_columns); if (selected_columns.size() != 0) { // Get a list of column data types std::vector<data_type> column_types; for (const auto &col : selected_columns) { auto &col_schema = _metadata->schema[_metadata->columns[col.first].schema_data_idx]; auto col_type = to_type_id(&col_schema); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); column_types.emplace_back(col_type); } if (_metadata->total_data_size > 0) { const auto buffer = _source->get_buffer(_metadata->block_list[0].offset, _metadata->total_data_size); rmm::device_buffer block_data(buffer->data(), buffer->size(), stream); if (_metadata->codec != "" && _metadata->codec != "null") { auto decomp_block_data = decompress_data(block_data, stream); block_data = std::move(decomp_block_data); } else { auto dst_ofs = _metadata->block_list[0].offset; for (size_t i = 0; i < _metadata->block_list.size(); i++) { _metadata->block_list[i].offset -= dst_ofs; } } size_t total_dictionary_entries = 0; size_t dictionary_data_size = 0; std::vector<std::pair<uint32_t, uint32_t>> dict(column_types.size()); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx]; dict[i].first = static_cast<uint32_t>(total_dictionary_entries); dict[i].second = static_cast<uint32_t>(col_schema.symbols.size()); total_dictionary_entries += dict[i].second; for (const auto &sym : col_schema.symbols) { dictionary_data_size += sym.length(); } } hostdevice_vector<uint8_t> global_dictionary( total_dictionary_entries * sizeof(gpu::nvstrdesc_s) + dictionary_data_size); if (total_dictionary_entries > 0) { size_t dict_pos = total_dictionary_entries * sizeof(gpu::nvstrdesc_s); for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; auto &col_schema = _metadata->schema[_metadata->columns[col_idx].schema_data_idx]; auto index = &(reinterpret_cast<gpu::nvstrdesc_s *>( global_dictionary.host_ptr()))[dict[i].first]; for (size_t j = 0; j < dict[i].second; j++) { size_t len = col_schema.symbols[j].length(); char *ptr = reinterpret_cast<char *>( global_dictionary.device_ptr() + dict_pos); index[j].ptr = ptr; index[j].count = len; memcpy(global_dictionary.host_ptr() + dict_pos, col_schema.symbols[j].c_str(), len); dict_pos += len; } } CUDA_TRY(cudaMemcpyAsync( global_dictionary.device_ptr(), global_dictionary.host_ptr(), global_dictionary.memory_size(), cudaMemcpyHostToDevice, stream)); } std::vector<column_buffer> out_buffers; for (size_t i = 0; i < column_types.size(); ++i) { auto col_idx = selected_columns[i].first; bool is_nullable = (_metadata->columns[col_idx].schema_null_idx >= 0); out_buffers.emplace_back(column_types[i], num_rows, is_nullable, stream, _mr); } decode_data(block_data, dict, global_dictionary, total_dictionary_entries, num_rows, selected_columns, out_buffers, stream); for (size_t i = 0; i < column_types.size(); ++i) { out_columns.emplace_back(make_column(column_types[i], num_rows, out_buffers[i], stream, _mr)); } } } // Return column names (must match order of returned columns) metadata_out.column_names.resize(selected_columns.size()); for (size_t i = 0; i < selected_columns.size(); i++) { metadata_out.column_names[i] = selected_columns[i].second; } // Return user metadata metadata_out.user_data = _metadata->user_data; return { std::make_unique<table>(std::move(out_columns)), std::move(metadata_out) }; } // Forward to implementation reader::reader(std::string filepath, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(filepath), options, mr)) { } // Forward to implementation reader::reader(const char *buffer, size_t length, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(buffer, length), options, mr)) {} // Forward to implementation reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file, reader_options const &options, rmm::mr::device_memory_resource *mr) : _impl(std::make_unique<impl>(datasource::create(file), options, mr)) {} // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read_all(cudaStream_t stream) { return _impl->read(0, -1, stream); } // Forward to implementation table_with_metadata reader::read_rows(size_type skip_rows, size_type num_rows, cudaStream_t stream) { return _impl->read(skip_rows, (num_rows != 0) ? num_rows : -1, stream); } } // namespace avro } // namespace detail } // namespace io } // namespace experimental } // namespace cudf
0a29702c63ee1a7cbcd568f77f2a90ecc8ee792f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../Headers/Includes.cuh" /////////////// General GPU Functions /////////////// __device__ void D_unit_vector(float *start, float *stop, float *vec){ // Gives the unit vector which points between two locations float magsq = 0; for (unsigned i = 0; i < 3; i++) { vec[i] = stop[i] - start[i]; magsq += vec[i] * vec[i]; } for (unsigned i = 0; i < 3; i++) { vec[i] = vec[i] * rsqrtf(magsq); } } __device__ float D_distance_between (float *start , float *stop){ float magsq = 0; for (unsigned i = 0; i < 3; i++) { magsq += (stop[i] - start[i]) * (stop[i] - start[i]); } return sqrtf(magsq); } __device__ float D_angle_between (float *A, float *B){ // Given 2 unit vectors it returns the angle between them in radians return fabsf( acosf( A[0]*B[0] + A[1]*B[1] + A[2]*B[2] ) ); } /////////////// Functions for Double Integration /////////////// __device__ float D_gaussian ( float x, float mean, float sigma ){ return expf( -(x-mean)*(x-mean)/(2*sigma*sigma) ); } __device__ float D_gaussian_double_integrate ( float E1, float E2, float sigma, float alpha, float gamma, unsigned INTSTEP ){ float MeCsq = 0.5109989461; float E1_min = fmaxf(0, E1-3*sigma); float E1_max = E1+3*sigma; float E1_step = (E1_max-E1_min)/INTSTEP; float integral = 0; for (unsigned i = 0; i < INTSTEP; i++) { // For a given e1 value float e1 = E1_min + i * E1_step; // We find the limits of E2 to keep the influence inside the voxel float E2_min = - (e1/2) + ( sqrtf(e1) * sqrtf( e1 + 4*MeCsq - e1*cosf(alpha+gamma) ) ) / ( 2*sqrtf( 1 - cosf(alpha+gamma) ) ); float E2_max = - (e1/2) + ( sqrtf(e1) * sqrtf( e1 + 4*MeCsq - e1*cosf(alpha-gamma) ) ) / ( 2*sqrtf( 1 - cosf(alpha-gamma) ) ); float E2_step = (E2_max-E2_min)/INTSTEP; float base = E1_step*E2_step; for (unsigned j = 0; j < INTSTEP; j++) { float e2 = E2_min + j * E2_step; if ( fabsf( 1.0 + MeCsq * ( 1.0/(e1+e2) - 1.0/(e2) ) ) >= 1.0 ) { continue; } float theta1 = acosf( 1.0 + MeCsq * ( 1.0/(e1+e2) - 1.0/(e2) ) ); if ( theta1<alpha-gamma || theta1>alpha+gamma ){ continue; } integral += D_gaussian(e1, E1, sigma) * D_gaussian(e2, E2, sigma) * sinf(theta1) * base; } } return integral; } __global__ void Find_Intersecting( float *conelist_1D_d, unsigned char *voxel_cone_interaction_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES, float delx, float dely, float delz, float x_start, float y_start, float z_start, unsigned INTSTEP ){ unsigned J = threadIdx.x + blockIdx.x * blockDim.x; // The identity of the cone that we are working on float amp_max = 0; float theta = conelist_1D_d [ 6 + J * 11 ]; // The scattering angle float sigma = conelist_1D_d [ 7 + J * 11 ]; // Scattering angle uncertainty float kn = conelist_1D_d [ 8 + J * 11 ]; // First part of the Klein-Nishina coefficient float E1 = conelist_1D_d [ 9 + J * 11 ]; // First energy deposition float E2 = conelist_1D_d [ 10 + J * 11 ]; // Second energy deposition for (unsigned run = 0; run < 2; run++) { for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { float voxel_center[3] = { x_start + delx * (float)(i+0.5) , y_start + dely * (float)(j+0.5) , z_start + delz * (float)(k+0.5) }; float line_between[3]{}; D_unit_vector( &conelist_1D_d [ 0 + J * 11 ] , voxel_center , line_between ); float alpha = D_angle_between ( &conelist_1D_d [ 3 + J * 11 ] , line_between ); // The angle from the cone axis to the cente of the voxel float R = D_distance_between( &conelist_1D_d [ 0 + J * 11 ] , voxel_center ); // The distance from the cone apex to the centre of the voxel float gamma = fabsf ( asinf ( delx / ( 2 * R ) ) ); // The angular radius of the voxel for the theta direction if ( fminf ( fabsf(theta-alpha+gamma) , fabsf(theta-alpha-gamma) ) < 3*sigma || (alpha-gamma-theta)*(alpha+gamma-theta) < 0 ) { //if the voxel is close enough to the gaussian of the cone, then we integrate float delta = fabsf ( asinf ( delx / ( 2 * R * sinf(alpha) ) ) ); // The angular radius of the voxel for the phi direction float integral = D_gaussian_double_integrate( E1, E2, 0.1, alpha, gamma, INTSTEP ); float term = ( ( R + delx/2 )*( R + delx/2 )*( R + delx/2 )/3 - ( R - delx/2 )*( R - delx/2 )*( R - delx/2 )/3 )*2*delta; float final_value = term * integral * ( kn - sinf(alpha)*sinf(alpha) ); if ( run == 0 && final_value > amp_max ) amp_max = final_value; if ( run == 1 ) voxel_cone_interaction_d[J + i*CONES + j*XDIVI*CONES + k*XDIVI*YDIVI*CONES] = __float2int_rd(255*final_value/amp_max+0.5); // Stores the interger value into the GPU global memory } } } } } } /////////////// GPU functions for Iteration /////////////// __global__ void Find_Max( float *f_d, float *voxel_max_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, unsigned ITHREADSPB ){ // In this kernel we only have a single block. unsigned index = threadIdx.x; extern __shared__ float cache[]; unsigned offset = 0; float temp_max = 0.0; while ( index + offset < (XDIVI*YDIVI*ZDIVI) ) { temp_max = fmaxf ( temp_max , f_d[index + offset] ); offset += ITHREADSPB; } cache[index] = temp_max; __syncthreads(); // Only the first thread will then look for the maximum within the block if ( index == 0 ) { float block_max = 0.0; for ( unsigned i = 0; i < ITHREADSPB; i++ ) { block_max = fmaxf ( block_max , cache[i] ); } *voxel_max_d = block_max; } } __global__ void Interior_Sum( float *f_d, unsigned char *voxel_cone_interaction_d, float *lambda_vector_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES ){ unsigned J = threadIdx.x + blockIdx.x * blockDim.x; // The identity of the cone that we are working on float sum = 0; for (unsigned voxel = 0; voxel < XDIVI*YDIVI*ZDIVI; voxel++) { // Now we iterate through all the voxels if( f_d[voxel]!=0 && voxel_cone_interaction_d[J + voxel*CONES] != 0 ){ // Which touched the current cone and is alive sum += voxel_cone_interaction_d[J + voxel*CONES] * f_d[voxel]; } } lambda_vector_d[J] = sum; } __global__ void Iterate( float *f_d, unsigned char *voxel_cone_interaction_d, float *lambda_vector_d, long unsigned CONES ){ unsigned voxID = threadIdx.x + blockIdx.x * blockDim.x; //The voxel this thread is currently working on if (f_d[voxID]!=0) { // Exclude all voxels where f is already 0, as it can never increase again float first_sum = 0; for (unsigned C = 0; C < CONES; C++){ // Now we iterate through all the cones if( voxel_cone_interaction_d[C + voxID*CONES] != 0 ){ // Which touched the current voxel first_sum += (float)(voxel_cone_interaction_d[C + voxID*CONES]) / lambda_vector_d[C]; } } f_d[voxID] *= first_sum; } } __global__ void Cull( float *f_d, unsigned char *voxel_cone_interaction_d, float *voxel_max_d, long unsigned CONES, float CUTOFF ){ unsigned trID = threadIdx.x + blockIdx.x * blockDim.x; if ( (f_d[ trID ] > 0) && (f_d[ trID ] < *voxel_max_d * CUTOFF) ) { // If the f value of a voxel gets too small f_d[ trID ] = 0; // We set the value straight to zero for (unsigned C = 0; C < CONES; C++){ // We also declare the voxel as dead voxel_cone_interaction_d[C + trID * CONES] = 0; } } } //
0a29702c63ee1a7cbcd568f77f2a90ecc8ee792f.cu
#include "../Headers/Includes.cuh" /////////////// General GPU Functions /////////////// __device__ void D_unit_vector(float *start, float *stop, float *vec){ // Gives the unit vector which points between two locations float magsq = 0; for (unsigned i = 0; i < 3; i++) { vec[i] = stop[i] - start[i]; magsq += vec[i] * vec[i]; } for (unsigned i = 0; i < 3; i++) { vec[i] = vec[i] * rsqrtf(magsq); } } __device__ float D_distance_between (float *start , float *stop){ float magsq = 0; for (unsigned i = 0; i < 3; i++) { magsq += (stop[i] - start[i]) * (stop[i] - start[i]); } return sqrtf(magsq); } __device__ float D_angle_between (float *A, float *B){ // Given 2 unit vectors it returns the angle between them in radians return fabsf( acosf( A[0]*B[0] + A[1]*B[1] + A[2]*B[2] ) ); } /////////////// Functions for Double Integration /////////////// __device__ float D_gaussian ( float x, float mean, float sigma ){ return expf( -(x-mean)*(x-mean)/(2*sigma*sigma) ); } __device__ float D_gaussian_double_integrate ( float E1, float E2, float sigma, float alpha, float gamma, unsigned INTSTEP ){ float MeCsq = 0.5109989461; float E1_min = fmaxf(0, E1-3*sigma); float E1_max = E1+3*sigma; float E1_step = (E1_max-E1_min)/INTSTEP; float integral = 0; for (unsigned i = 0; i < INTSTEP; i++) { // For a given e1 value float e1 = E1_min + i * E1_step; // We find the limits of E2 to keep the influence inside the voxel float E2_min = - (e1/2) + ( sqrtf(e1) * sqrtf( e1 + 4*MeCsq - e1*cosf(alpha+gamma) ) ) / ( 2*sqrtf( 1 - cosf(alpha+gamma) ) ); float E2_max = - (e1/2) + ( sqrtf(e1) * sqrtf( e1 + 4*MeCsq - e1*cosf(alpha-gamma) ) ) / ( 2*sqrtf( 1 - cosf(alpha-gamma) ) ); float E2_step = (E2_max-E2_min)/INTSTEP; float base = E1_step*E2_step; for (unsigned j = 0; j < INTSTEP; j++) { float e2 = E2_min + j * E2_step; if ( fabsf( 1.0 + MeCsq * ( 1.0/(e1+e2) - 1.0/(e2) ) ) >= 1.0 ) { continue; } float theta1 = acosf( 1.0 + MeCsq * ( 1.0/(e1+e2) - 1.0/(e2) ) ); if ( theta1<alpha-gamma || theta1>alpha+gamma ){ continue; } integral += D_gaussian(e1, E1, sigma) * D_gaussian(e2, E2, sigma) * sinf(theta1) * base; } } return integral; } __global__ void Find_Intersecting( float *conelist_1D_d, unsigned char *voxel_cone_interaction_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES, float delx, float dely, float delz, float x_start, float y_start, float z_start, unsigned INTSTEP ){ unsigned J = threadIdx.x + blockIdx.x * blockDim.x; // The identity of the cone that we are working on float amp_max = 0; float theta = conelist_1D_d [ 6 + J * 11 ]; // The scattering angle float sigma = conelist_1D_d [ 7 + J * 11 ]; // Scattering angle uncertainty float kn = conelist_1D_d [ 8 + J * 11 ]; // First part of the Klein-Nishina coefficient float E1 = conelist_1D_d [ 9 + J * 11 ]; // First energy deposition float E2 = conelist_1D_d [ 10 + J * 11 ]; // Second energy deposition for (unsigned run = 0; run < 2; run++) { for (unsigned i = 0; i < XDIVI; i++) { for (unsigned j = 0; j < YDIVI; j++) { for (unsigned k = 0; k < ZDIVI; k++) { float voxel_center[3] = { x_start + delx * (float)(i+0.5) , y_start + dely * (float)(j+0.5) , z_start + delz * (float)(k+0.5) }; float line_between[3]{}; D_unit_vector( &conelist_1D_d [ 0 + J * 11 ] , voxel_center , line_between ); float alpha = D_angle_between ( &conelist_1D_d [ 3 + J * 11 ] , line_between ); // The angle from the cone axis to the cente of the voxel float R = D_distance_between( &conelist_1D_d [ 0 + J * 11 ] , voxel_center ); // The distance from the cone apex to the centre of the voxel float gamma = fabsf ( asinf ( delx / ( 2 * R ) ) ); // The angular radius of the voxel for the theta direction if ( fminf ( fabsf(theta-alpha+gamma) , fabsf(theta-alpha-gamma) ) < 3*sigma || (alpha-gamma-theta)*(alpha+gamma-theta) < 0 ) { //if the voxel is close enough to the gaussian of the cone, then we integrate float delta = fabsf ( asinf ( delx / ( 2 * R * sinf(alpha) ) ) ); // The angular radius of the voxel for the phi direction float integral = D_gaussian_double_integrate( E1, E2, 0.1, alpha, gamma, INTSTEP ); float term = ( ( R + delx/2 )*( R + delx/2 )*( R + delx/2 )/3 - ( R - delx/2 )*( R - delx/2 )*( R - delx/2 )/3 )*2*delta; float final_value = term * integral * ( kn - sinf(alpha)*sinf(alpha) ); if ( run == 0 && final_value > amp_max ) amp_max = final_value; if ( run == 1 ) voxel_cone_interaction_d[J + i*CONES + j*XDIVI*CONES + k*XDIVI*YDIVI*CONES] = __float2int_rd(255*final_value/amp_max+0.5); // Stores the interger value into the GPU global memory } } } } } } /////////////// GPU functions for Iteration /////////////// __global__ void Find_Max( float *f_d, float *voxel_max_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, unsigned ITHREADSPB ){ // In this kernel we only have a single block. unsigned index = threadIdx.x; extern __shared__ float cache[]; unsigned offset = 0; float temp_max = 0.0; while ( index + offset < (XDIVI*YDIVI*ZDIVI) ) { temp_max = fmaxf ( temp_max , f_d[index + offset] ); offset += ITHREADSPB; } cache[index] = temp_max; __syncthreads(); // Only the first thread will then look for the maximum within the block if ( index == 0 ) { float block_max = 0.0; for ( unsigned i = 0; i < ITHREADSPB; i++ ) { block_max = fmaxf ( block_max , cache[i] ); } *voxel_max_d = block_max; } } __global__ void Interior_Sum( float *f_d, unsigned char *voxel_cone_interaction_d, float *lambda_vector_d, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, long unsigned CONES ){ unsigned J = threadIdx.x + blockIdx.x * blockDim.x; // The identity of the cone that we are working on float sum = 0; for (unsigned voxel = 0; voxel < XDIVI*YDIVI*ZDIVI; voxel++) { // Now we iterate through all the voxels if( f_d[voxel]!=0 && voxel_cone_interaction_d[J + voxel*CONES] != 0 ){ // Which touched the current cone and is alive sum += voxel_cone_interaction_d[J + voxel*CONES] * f_d[voxel]; } } lambda_vector_d[J] = sum; } __global__ void Iterate( float *f_d, unsigned char *voxel_cone_interaction_d, float *lambda_vector_d, long unsigned CONES ){ unsigned voxID = threadIdx.x + blockIdx.x * blockDim.x; //The voxel this thread is currently working on if (f_d[voxID]!=0) { // Exclude all voxels where f is already 0, as it can never increase again float first_sum = 0; for (unsigned C = 0; C < CONES; C++){ // Now we iterate through all the cones if( voxel_cone_interaction_d[C + voxID*CONES] != 0 ){ // Which touched the current voxel first_sum += (float)(voxel_cone_interaction_d[C + voxID*CONES]) / lambda_vector_d[C]; } } f_d[voxID] *= first_sum; } } __global__ void Cull( float *f_d, unsigned char *voxel_cone_interaction_d, float *voxel_max_d, long unsigned CONES, float CUTOFF ){ unsigned trID = threadIdx.x + blockIdx.x * blockDim.x; if ( (f_d[ trID ] > 0) && (f_d[ trID ] < *voxel_max_d * CUTOFF) ) { // If the f value of a voxel gets too small f_d[ trID ] = 0; // We set the value straight to zero for (unsigned C = 0; C < CONES; C++){ // We also declare the voxel as dead voxel_cone_interaction_d[C + trID * CONES] = 0; } } } //
d0ba8fa01ab5ca022694274c8a8572bf0ef4a949.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "stdafx.h" #include "activation.h" #include "cuda_tensor.h" #include <hip/hip_fp16.h> __global__ static void activate_float_kernel(const float* in, float* out, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { val = in[index]; switch (mode) { case LEAKY: if (val < 0.0f) out[index] = 0.1f * val; else out[index] = val; break; case LOGISTIC: out[index] = 1.0f / (1.0f + exp(-val)); //if (index < 13) // printf("in: %f, out : %f\n", val, out[index]); break; case RELU: if (val < 0.0f) out[index] = 0.0f; else out[index] = val; break; case HARDTAN: if (val < -1.0) out[index] = -1.0f; else if (val > 1.0f) out[index] = 1.0f; else out[index] = val; break; case LHTAN: if (val < 0.0f) out[index] = val * 0.001f; else if (val > 1.0f) out[index] = 0.001f * (val - 1.0f) + 1.0f; else out[index] = val; break; case TANH: val = exp(2.0f * val); out[index] = (val - 1.0f) / (val + 1.0f); break; case LOGGY: val = 2.0f / (1.0f + exp(-val)); out[index] = 2.0f / (1.0f + exp(-val)); break; case ELU: if (val < 0.0f) out[index] = exp(val) - 1.0f; else out[index] = val; break; case LINEAR: out[index] = val; break; default: break; } index += threads; } } __global__ static void activate_half_kernel(const __half* in, __half* out, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; float fx; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { fx = __half2float(in[index]); switch (mode) { case LEAKY: if (fx < 0.0f) out[index] = __float2half(fx * 0.1); else out[index] = in[index]; break; case LOGISTIC: val = 1.0f / (1.0f + exp(-fx)); out[index] = __float2half(val); break; case RELU: if (fx < 0.0f) { out[index] = 0.0; } else out[index] = in[index]; break; case HARDTAN: if (fx < -1.0f) out[index] = __float2half(-1.0f); else if (val > 1.0f) out[index] = __float2half(1.0f); else out[index] = in[index]; break; case LHTAN: if (fx < 0.0f) out[index] = __float2half(fx * 0.001f); else if (fx > 1.0f) out[index] = __float2half(0.001f * (fx - 1.0f) + 1.0f); else out[index] = in[index]; break; case TANH: val = exp(2.0f * fx); val = (val - 1.0f) / (val + 1.0f); out[index] = __float2half(val); break; case LOGGY: val = 2.0f / (1.0f + exp(-fx)); out[index] = __float2half(val); break; case ELU: if (fx < 0.0f) { out[index] = __float2half(exp(fx) - 1.0f); } else out[index] = in[index]; break; case LINEAR: out[index] = in[index]; break; default: break; } index += threads; } } // output is delta __global__ static void gradient_float_kernel(const float* y, float* dy, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { switch (mode) { case LEAKY: if (y[index] < 0.0) dy[index] *= 0.1f; break; case LOGISTIC: val = y[index] * (1.0 - y[index]); dy[index] *= val; break; case RELU: if (y[index] <= 0.0) dy[index] = 0.0; break; case HARDTAN: if (y[index] > -1.0 && y[index] < 1.0) dy[index] = 1.0; else dy[index] = 0.0; break; case LHTAN: if (y[index] <= 0.0 || y[index] >= 1.0) dy[index] *= 0.001; break; case TANH: val = y[index] * y[index]; dy[index] *= (1.0 - val); break; case LOGGY: val = (y[index] + 1.0) * 0.5; dy[index] = 2.0 * (1 - val) * val * dy[index]; break; case ELU: if (y[index] < 0.0) dy[index] *= (y[index] + 1.0); break; case LINEAR: default: break; } index += threads; } } #if 0 __global__ static void gradient_half_kernel(const __half* y, __half* dy, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; float fy; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { fy = __half2float(y[index]); switch (mode) { case LEAKY: if (fy < 0.0f) dy[index] = __hmul(dy[index] , __float2half(0.1f)); break; case LOGISTIC: val = fy * (1.0f - fy) * __half2float(dy[index]); dy[index] = __float2half(val); break; case RELU: if (fy <= 0.0f) dy[index] = __float2half(0.0f); break; case HARDTAN: if (fy > -1.0f && fy< 1.0f) dy[index] = __float2half(1.0f); else dy[index] = __float2half(0.0f); break; case LHTAN: if (fy <= 0.0f || fy >= 1.0) dy[index] = __hmul(dy[index], __float2half(0.001f)); break; case TANH: val = 1.0f - fy * fy; dy[index] = __hmul(dy[index], __float2half(val)); break; case LOGGY:{ val = (fy + 1.0f) * 0.5f; float temp = 2.0f * (1.0f - val) * val; dy[index] = __hmul(dy[index], __float2half(temp)); } break; case ELU: if (fy < 0.0f) { val = fy + 1.0f; dy[index] = __hmul(dy[index], __float2half(val)); } break; default: break; } index += threads; } } #endif bool gradient_array_ongpu(const void* y, void* delta, int elements, cudnnDataType_t data_type, ActivationMode mode) { if (mode == LINEAR) return true; int g = GPUGridSize(); int b = GPUBlockSize(); if (data_type == CUDNN_DATA_FLOAT) { const float* in = reinterpret_cast<const float*>(y); float* out = reinterpret_cast<float*>(delta); hipLaunchKernelGGL(( gradient_float_kernel) , dim3(g), dim3(b) , 0, 0, in, out, elements, data_type, mode); hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { cerr << "gradient_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } } else if (data_type == CUDNN_DATA_HALF) { const __half* in = reinterpret_cast<const __half*>(y); CudaPtr<float> fmt_buf(elements); if (!f16_to_f32(fmt_buf, in, elements)) return false; if (y == delta) { hipLaunchKernelGGL(( gradient_float_kernel) , dim3(g), dim3(b) , 0, 0, fmt_buf.ptr, fmt_buf.ptr, elements, data_type, mode); } else { CudaPtr<float> fmt_buf2(elements); if (hipSuccess != hipMemcpy(fmt_buf2.ptr, fmt_buf.ptr, fmt_buf.Bytes(), hipMemcpyDeviceToDevice)) return false; gradient_float_kernel << <g, b >> > (fmt_buf2.ptr, fmt_buf.ptr, elements, data_type, mode); } hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { cerr << "gradient_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } __half* out = reinterpret_cast<__half*>(delta); return f32_to_f16(out, fmt_buf.ptr, elements); } else { return false; } return true; } bool activate_array_ongpu(const void* x, void* y, int elements, cudnnDataType_t data_type, ActivationMode mode) { int g = GPUGridSize(); int b = GPUBlockSize(); if (data_type == CUDNN_DATA_FLOAT) { const float* in = reinterpret_cast<const float*>(x); float* out = reinterpret_cast<float*>(y); if (mode == LINEAR) return hipSuccess == hipMemcpy(out, in, elements * sizeof(float), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( activate_float_kernel) , dim3(g), dim3(b) , 0, 0, in, out, elements, data_type, mode); } else if (data_type == CUDNN_DATA_HALF) { const __half* in = reinterpret_cast<const __half*>(x); __half* out = reinterpret_cast<__half*>(y); if (mode == LINEAR) return hipSuccess == hipMemcpy(out, in, elements * sizeof(__half), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( activate_half_kernel) , dim3(g), dim3(b) , 0, 0, in, out, elements, data_type, mode); } else { return false; } hipError_t err = hipDeviceSynchronize(); if (err != hipSuccess) { cerr << "activate_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } return true; }
d0ba8fa01ab5ca022694274c8a8572bf0ef4a949.cu
#include "stdafx.h" #include "activation.h" #include "cuda_tensor.h" #include <cuda_fp16.h> __global__ static void activate_float_kernel(const float* in, float* out, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { val = in[index]; switch (mode) { case LEAKY: if (val < 0.0f) out[index] = 0.1f * val; else out[index] = val; break; case LOGISTIC: out[index] = 1.0f / (1.0f + exp(-val)); //if (index < 13) // printf("in: %f, out : %f\n", val, out[index]); break; case RELU: if (val < 0.0f) out[index] = 0.0f; else out[index] = val; break; case HARDTAN: if (val < -1.0) out[index] = -1.0f; else if (val > 1.0f) out[index] = 1.0f; else out[index] = val; break; case LHTAN: if (val < 0.0f) out[index] = val * 0.001f; else if (val > 1.0f) out[index] = 0.001f * (val - 1.0f) + 1.0f; else out[index] = val; break; case TANH: val = exp(2.0f * val); out[index] = (val - 1.0f) / (val + 1.0f); break; case LOGGY: val = 2.0f / (1.0f + exp(-val)); out[index] = 2.0f / (1.0f + exp(-val)); break; case ELU: if (val < 0.0f) out[index] = exp(val) - 1.0f; else out[index] = val; break; case LINEAR: out[index] = val; break; default: break; } index += threads; } } __global__ static void activate_half_kernel(const __half* in, __half* out, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; float fx; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { fx = __half2float(in[index]); switch (mode) { case LEAKY: if (fx < 0.0f) out[index] = __float2half(fx * 0.1); else out[index] = in[index]; break; case LOGISTIC: val = 1.0f / (1.0f + exp(-fx)); out[index] = __float2half(val); break; case RELU: if (fx < 0.0f) { out[index] = 0.0; } else out[index] = in[index]; break; case HARDTAN: if (fx < -1.0f) out[index] = __float2half(-1.0f); else if (val > 1.0f) out[index] = __float2half(1.0f); else out[index] = in[index]; break; case LHTAN: if (fx < 0.0f) out[index] = __float2half(fx * 0.001f); else if (fx > 1.0f) out[index] = __float2half(0.001f * (fx - 1.0f) + 1.0f); else out[index] = in[index]; break; case TANH: val = exp(2.0f * fx); val = (val - 1.0f) / (val + 1.0f); out[index] = __float2half(val); break; case LOGGY: val = 2.0f / (1.0f + exp(-fx)); out[index] = __float2half(val); break; case ELU: if (fx < 0.0f) { out[index] = __float2half(exp(fx) - 1.0f); } else out[index] = in[index]; break; case LINEAR: out[index] = in[index]; break; default: break; } index += threads; } } // output is delta __global__ static void gradient_float_kernel(const float* y, float* dy, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { switch (mode) { case LEAKY: if (y[index] < 0.0) dy[index] *= 0.1f; break; case LOGISTIC: val = y[index] * (1.0 - y[index]); dy[index] *= val; break; case RELU: if (y[index] <= 0.0) dy[index] = 0.0; break; case HARDTAN: if (y[index] > -1.0 && y[index] < 1.0) dy[index] = 1.0; else dy[index] = 0.0; break; case LHTAN: if (y[index] <= 0.0 || y[index] >= 1.0) dy[index] *= 0.001; break; case TANH: val = y[index] * y[index]; dy[index] *= (1.0 - val); break; case LOGGY: val = (y[index] + 1.0) * 0.5; dy[index] = 2.0 * (1 - val) * val * dy[index]; break; case ELU: if (y[index] < 0.0) dy[index] *= (y[index] + 1.0); break; case LINEAR: default: break; } index += threads; } } #if 0 __global__ static void gradient_half_kernel(const __half* y, __half* dy, int elements, cudnnDataType_t data_type, ActivationMode mode) { float val; float fy; int index = blockDim.x * blockIdx.x + threadIdx.x; int threads = gridDim.x * blockDim.x; while (index < elements) { fy = __half2float(y[index]); switch (mode) { case LEAKY: if (fy < 0.0f) dy[index] = __hmul(dy[index] , __float2half(0.1f)); break; case LOGISTIC: val = fy * (1.0f - fy) * __half2float(dy[index]); dy[index] = __float2half(val); break; case RELU: if (fy <= 0.0f) dy[index] = __float2half(0.0f); break; case HARDTAN: if (fy > -1.0f && fy< 1.0f) dy[index] = __float2half(1.0f); else dy[index] = __float2half(0.0f); break; case LHTAN: if (fy <= 0.0f || fy >= 1.0) dy[index] = __hmul(dy[index], __float2half(0.001f)); break; case TANH: val = 1.0f - fy * fy; dy[index] = __hmul(dy[index], __float2half(val)); break; case LOGGY:{ val = (fy + 1.0f) * 0.5f; float temp = 2.0f * (1.0f - val) * val; dy[index] = __hmul(dy[index], __float2half(temp)); } break; case ELU: if (fy < 0.0f) { val = fy + 1.0f; dy[index] = __hmul(dy[index], __float2half(val)); } break; default: break; } index += threads; } } #endif bool gradient_array_ongpu(const void* y, void* delta, int elements, cudnnDataType_t data_type, ActivationMode mode) { if (mode == LINEAR) return true; int g = GPUGridSize(); int b = GPUBlockSize(); if (data_type == CUDNN_DATA_FLOAT) { const float* in = reinterpret_cast<const float*>(y); float* out = reinterpret_cast<float*>(delta); gradient_float_kernel <<<g, b >>> (in, out, elements, data_type, mode); cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { cerr << "gradient_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } } else if (data_type == CUDNN_DATA_HALF) { const __half* in = reinterpret_cast<const __half*>(y); CudaPtr<float> fmt_buf(elements); if (!f16_to_f32(fmt_buf, in, elements)) return false; if (y == delta) { gradient_float_kernel <<<g, b >>> (fmt_buf.ptr, fmt_buf.ptr, elements, data_type, mode); } else { CudaPtr<float> fmt_buf2(elements); if (cudaSuccess != cudaMemcpy(fmt_buf2.ptr, fmt_buf.ptr, fmt_buf.Bytes(), cudaMemcpyDeviceToDevice)) return false; gradient_float_kernel << <g, b >> > (fmt_buf2.ptr, fmt_buf.ptr, elements, data_type, mode); } cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { cerr << "gradient_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } __half* out = reinterpret_cast<__half*>(delta); return f32_to_f16(out, fmt_buf.ptr, elements); } else { return false; } return true; } bool activate_array_ongpu(const void* x, void* y, int elements, cudnnDataType_t data_type, ActivationMode mode) { int g = GPUGridSize(); int b = GPUBlockSize(); if (data_type == CUDNN_DATA_FLOAT) { const float* in = reinterpret_cast<const float*>(x); float* out = reinterpret_cast<float*>(y); if (mode == LINEAR) return cudaSuccess == cudaMemcpy(out, in, elements * sizeof(float), cudaMemcpyDeviceToDevice); activate_float_kernel <<<g, b >>> (in, out, elements, data_type, mode); } else if (data_type == CUDNN_DATA_HALF) { const __half* in = reinterpret_cast<const __half*>(x); __half* out = reinterpret_cast<__half*>(y); if (mode == LINEAR) return cudaSuccess == cudaMemcpy(out, in, elements * sizeof(__half), cudaMemcpyDeviceToDevice); activate_half_kernel <<<g, b >>> (in, out, elements, data_type, mode); } else { return false; } cudaError_t err = cudaDeviceSynchronize(); if (err != cudaSuccess) { cerr << "activate_array_ongpu failed! elements:" << elements << ", err :" << (int)err << endl; return false; } return true; }
b12bb77f890447b19458e48d2ca052249cdcc43b.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace ndegtwisted { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_NDEG_TWISTED_MASS_DIRAC #include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass #endif #ifndef NDEGTM_SHARED_FLOATS_PER_THREAD #define NDEGTM_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted // declare the dslash events #include <dslash_events.cuh> using namespace ndegtwisted; #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class NdegTwistedDslashCuda : public SharedDslashCuda { private: const gFloat *gauge0, *gauge1; const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int dagger) : SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType) { bindSpinorTex<sFloat>(in, out, x); a = kappa; b = mu; c = epsilon; d = k; dslashParam.gauge0 = (void*)gauge0; dslashParam.gauge1 = (void*)gauge1; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.c = epsilon; dslashParam.c_f = epsilon; dslashParam.d = k; dslashParam.d_f = k; if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash"); dslashParam.fl_stride = in->VolumeCB()/2; } virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); strcat(key.aux,",NdegDslash"); return key; } void apply(const hipStream_t &stream) { // factor of 2 (or 1) for T-dimensional spin projection (FIXME - unnecessary) dslashParam.tProjScale = getKernelPackT() ? 1.0 : 2.0; dslashParam.tProjScale_f = (float)(dslashParam.tProjScale); #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted-mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_NDEG_TWISTED_MASS_DIRAC #include <dslash_policy.cuh> void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { inSpinor = (cudaColorSpinorField*)in; // EVIL inSpinor->createComms(1); #ifdef GPU_NDEG_TWISTED_MASS_DIRAC int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code int ghost_threads[4] = {0}; int bulk_threads = in->Volume() / 2; for(int i=0;i<4;i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder(); dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder(); dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0); dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 ghost_threads[i] = in->GhostFace()[i] / 2; } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); regSize = sizeof(double); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); } DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Non-degenerate twisted mass dslash has not been built"); #endif } }
b12bb77f890447b19458e48d2ca052249cdcc43b.cu
#include <cstdlib> #include <cstdio> #include <string> #include <iostream> #include <color_spinor_field.h> #include <clover_field.h> // these control the Wilson-type actions #ifdef GPU_WILSON_DIRAC //#define DIRECT_ACCESS_LINK //#define DIRECT_ACCESS_WILSON_SPINOR //#define DIRECT_ACCESS_WILSON_ACCUM //#define DIRECT_ACCESS_WILSON_INTER //#define DIRECT_ACCESS_WILSON_PACK_SPINOR //#define DIRECT_ACCESS_CLOVER #endif // GPU_WILSON_DIRAC #include <quda_internal.h> #include <dslash_quda.h> #include <sys/time.h> #include <blas_quda.h> #include <face_quda.h> #include <inline_ptx.h> namespace quda { namespace ndegtwisted { #include <dslash_constants.h> #include <dslash_textures.h> #include <dslash_index.cuh> // Enable shared memory dslash for Fermi architecture //#define SHARED_WILSON_DSLASH //#define SHARED_8_BYTE_WORD_SIZE // 8-byte shared memory access #ifdef GPU_NDEG_TWISTED_MASS_DIRAC #include <tm_ndeg_dslash_def.h> // Non-degenerate twisted Mass #endif #ifndef NDEGTM_SHARED_FLOATS_PER_THREAD #define NDEGTM_SHARED_FLOATS_PER_THREAD 0 #endif #include <dslash_quda.cuh> } // end namespace twisted // declare the dslash events #include <dslash_events.cuh> using namespace ndegtwisted; #ifdef GPU_NDEG_TWISTED_MASS_DIRAC template <typename sFloat, typename gFloat> class NdegTwistedDslashCuda : public SharedDslashCuda { private: const gFloat *gauge0, *gauge1; const QudaTwistDslashType dslashType; double a, b, c, d; protected: unsigned int sharedBytesPerThread() const { if (dslashParam.kernel_type == INTERIOR_KERNEL) { int reg_size = (typeid(sFloat)==typeid(double2) ? sizeof(double) : sizeof(float)); return NDEGTM_SHARED_FLOATS_PER_THREAD * reg_size; } else { return 0; } } public: NdegTwistedDslashCuda(cudaColorSpinorField *out, const gFloat *gauge0, const gFloat *gauge1, const QudaReconstructType reconstruct, const cudaColorSpinorField *in, const cudaColorSpinorField *x, const QudaTwistDslashType dslashType, const double kappa, const double mu, const double epsilon, const double k, const int dagger) : SharedDslashCuda(out, in, x, reconstruct, dagger), gauge0(gauge0), gauge1(gauge1), dslashType(dslashType) { bindSpinorTex<sFloat>(in, out, x); a = kappa; b = mu; c = epsilon; d = k; dslashParam.gauge0 = (void*)gauge0; dslashParam.gauge1 = (void*)gauge1; dslashParam.a = kappa; dslashParam.a_f = kappa; dslashParam.b = mu; dslashParam.b_f = mu; dslashParam.c = epsilon; dslashParam.c_f = epsilon; dslashParam.d = k; dslashParam.d_f = k; if (dslashType != QUDA_NONDEG_DSLASH) errorQuda("Invalid dslashType for non-degenerate twisted-mass Dslash"); dslashParam.fl_stride = in->VolumeCB()/2; } virtual ~NdegTwistedDslashCuda() { unbindSpinorTex<sFloat>(in, out, x); } TuneKey tuneKey() const { TuneKey key = DslashCuda::tuneKey(); strcat(key.aux,",NdegDslash"); return key; } void apply(const cudaStream_t &stream) { // factor of 2 (or 1) for T-dimensional spin projection (FIXME - unnecessary) dslashParam.tProjScale = getKernelPackT() ? 1.0 : 2.0; dslashParam.tProjScale_f = (float)(dslashParam.tProjScale); #ifdef SHARED_WILSON_DSLASH if (dslashParam.kernel_type == EXTERIOR_KERNEL_X) errorQuda("Shared dslash does not yet support X-dimension partitioning"); #endif TuneParam tp = tuneLaunch(*this, getTuning(), getVerbosity()); NDEG_TM_DSLASH(twistedNdegMassDslash, tp.grid, tp.block, tp.shared_bytes, stream, dslashParam); } long long flops() const { int twisted_flops = 48; long long flops = DslashCuda::flops(); switch(dslashParam.kernel_type) { case EXTERIOR_KERNEL_X: case EXTERIOR_KERNEL_Y: case EXTERIOR_KERNEL_Z: case EXTERIOR_KERNEL_T: case EXTERIOR_KERNEL_ALL: break; case INTERIOR_KERNEL: case KERNEL_POLICY: // twisted-mass flops are done in the interior kernel flops += twisted_flops * in->VolumeCB(); break; } return flops; } }; #endif // GPU_NDEG_TWISTED_MASS_DIRAC #include <dslash_policy.cuh> void ndegTwistedMassDslashCuda(cudaColorSpinorField *out, const cudaGaugeField &gauge, const cudaColorSpinorField *in, const int parity, const int dagger, const cudaColorSpinorField *x, const QudaTwistDslashType type, const double &kappa, const double &mu, const double &epsilon, const double &k, const int *commOverride, TimeProfile &profile) { inSpinor = (cudaColorSpinorField*)in; // EVIL inSpinor->createComms(1); #ifdef GPU_NDEG_TWISTED_MASS_DIRAC int Npad = (in->Ncolor()*in->Nspin()*2)/in->FieldOrder(); // SPINOR_HOP in old code int ghost_threads[4] = {0}; int bulk_threads = in->Volume() / 2; for(int i=0;i<4;i++){ dslashParam.ghostDim[i] = commDimPartitioned(i); // determines whether to use regular or ghost indexing at boundary dslashParam.ghostOffset[i][0] = in->GhostOffset(i,0)/in->FieldOrder(); dslashParam.ghostOffset[i][1] = in->GhostOffset(i,1)/in->FieldOrder(); dslashParam.ghostNormOffset[i][0] = in->GhostNormOffset(i,0); dslashParam.ghostNormOffset[i][1] = in->GhostNormOffset(i,1); dslashParam.commDim[i] = (!commOverride[i]) ? 0 : commDimPartitioned(i); // switch off comms if override = 0 ghost_threads[i] = in->GhostFace()[i] / 2; } void *gauge0, *gauge1; bindGaugeTex(gauge, parity, &gauge0, &gauge1); if (in->Precision() != gauge.Precision()) errorQuda("Mixing gauge and spinor precision not supported"); DslashCuda *dslash = 0; size_t regSize = sizeof(float); if (in->Precision() == QUDA_DOUBLE_PRECISION) { dslash = new NdegTwistedDslashCuda<double2,double2>(out, (double2*)gauge0,(double2*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); regSize = sizeof(double); } else if (in->Precision() == QUDA_SINGLE_PRECISION) { dslash = new NdegTwistedDslashCuda<float4,float4>(out, (float4*)gauge0,(float4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); } else if (in->Precision() == QUDA_HALF_PRECISION) { dslash = new NdegTwistedDslashCuda<short4,short4>(out, (short4*)gauge0,(short4*)gauge1, gauge.Reconstruct(), in, x, type, kappa, mu, epsilon, k, dagger); } DslashPolicyTune dslash_policy(*dslash, const_cast<cudaColorSpinorField*>(in), regSize, parity, dagger, bulk_threads, ghost_threads, profile); dslash_policy.apply(0); delete dslash; unbindGaugeTex(gauge); checkCudaError(); #else errorQuda("Non-degenerate twisted mass dslash has not been built"); #endif } }